添加qat量化支持

This commit is contained in:
lhr
2026-01-08 15:12:27 +08:00
parent f4b1f341fc
commit 546a510eb2
8 changed files with 862 additions and 25 deletions

View File

@@ -2,7 +2,7 @@ import torch
import cv2
import numpy as np
import torchvision
from yolo11_standalone import YOLO11, YOLOPostProcessor
from yolo11_standalone import YOLO11, YOLOPostProcessor, YOLOPostProcessorNumpy
CLASSES = [
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
@@ -26,7 +26,7 @@ def letterbox(im, new_shape=(640, 640), color=(114, 114, 114)):
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]
dw, dh = dw / 2, dh / 2
if shape[::-1] != new_unpad:
@@ -40,13 +40,13 @@ def letterbox(im, new_shape=(640, 640), color=(114, 114, 114)):
def xywh2xyxy(x):
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x
y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y
y[..., 2] = x[..., 0] + x[..., 2] / 2 # bottom right x
y[..., 3] = x[..., 1] + x[..., 3] / 2 # bottom right y
y[..., 0] = x[..., 0] - x[..., 2] / 2
y[..., 1] = x[..., 1] - x[..., 3] / 2
y[..., 2] = x[..., 0] + x[..., 2] / 2
y[..., 3] = x[..., 1] + x[..., 3] / 2
return y
def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, max_det=300):
def non_max_suppression(prediction, conf_thres=0.01, iou_thres=0.45, max_det=300):
prediction = prediction.transpose(1, 2)
bs = prediction.shape[0]
@@ -75,12 +75,63 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, max_det=300
return output
def non_max_suppression_numpy(prediction, conf_thres=0.25, iou_thres=0.45, max_det=300):
bs = prediction.shape[0]
output = [np.zeros((0, 6), dtype=np.float32)] * bs
for xi, x in enumerate(prediction):
bbox_xywh = x[:, :4]
class_probs = x[:, 4:]
class_ids = np.argmax(class_probs, axis=1)
confidences = np.max(class_probs, axis=1)
mask = confidences > conf_thres
bbox_xywh = bbox_xywh[mask]
confidences = confidences[mask]
class_ids = class_ids[mask]
if len(confidences) == 0:
continue
bbox_tlwh = np.copy(bbox_xywh)
bbox_tlwh[:, 0] = bbox_xywh[:, 0] - bbox_xywh[:, 2] / 2
bbox_tlwh[:, 1] = bbox_xywh[:, 1] - bbox_xywh[:, 3] / 2
indices = cv2.dnn.NMSBoxes(
bboxes=bbox_tlwh.tolist(),
scores=confidences.tolist(),
score_threshold=conf_thres,
nms_threshold=iou_thres
)
if len(indices) > 0:
indices = indices.flatten()
if len(indices) > max_det:
indices = indices[:max_det]
final_boxes_xywh = bbox_xywh[indices]
final_boxes_xyxy = xywh2xyxy(final_boxes_xywh)
final_scores = confidences[indices]
final_classes = class_ids[indices]
out_tensor = np.concatenate([
final_boxes_xyxy,
final_scores[:, None],
final_classes[:, None]
], axis=1)
output[xi] = out_tensor
return output
def main():
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
model = YOLO11(nc=80, scale='s')
model.load_weights("yolo11s.pth")
model.load_weights("my_yolo_result_qat/best_fp32_converted.pth")
model.to(device)
model.eval()
post_std = YOLOPostProcessor(model.model[-1], use_segmentation=False)
@@ -104,20 +155,28 @@ def main():
with torch.no_grad():
pred = model(img_tensor)
pred = post_std(pred)
pred = non_max_suppression(pred, conf_thres=0.25, iou_thres=0.45)
det = pred[0]
# pred = post_std(pred)
# pred = non_max_suppression(pred, conf_thres=0.25, iou_thres=0.45)
# det = pred[0]
preds_raw_numpy = [p.cpu().numpy() for p in pred]
post_numpy = YOLOPostProcessorNumpy(strides=[8, 16, 32], reg_max=16, use_segmentation=False)
pred_numpy_decoded = post_numpy(preds_raw_numpy)
pred_results = non_max_suppression_numpy(pred_numpy_decoded, conf_thres=0.25, iou_thres=0.45)
det = pred_results[0]
if len(det):
det[:, [0, 2]] -= dw # x padding
det[:, [1, 3]] -= dh # y padding
det[:, :4] /= ratio
det[:, 0].clamp_(0, img0.shape[1])
det[:, 1].clamp_(0, img0.shape[0])
det[:, 2].clamp_(0, img0.shape[1])
det[:, 3].clamp_(0, img0.shape[0])
# det[:, 0].clamp_(0, img0.shape[1])
# det[:, 1].clamp_(0, img0.shape[0])
# det[:, 2].clamp_(0, img0.shape[1])
# det[:, 3].clamp_(0, img0.shape[0])
det[:, 0] = np.clip(det[:, 0], 0, img0.shape[1])
det[:, 1] = np.clip(det[:, 1], 0, img0.shape[0])
det[:, 2] = np.clip(det[:, 2], 0, img0.shape[1])
det[:, 3] = np.clip(det[:, 3], 0, img0.shape[0])
print(f"检测到 {len(det)} 个目标")