Unverified 提交 a820b43a authored 作者: Glenn Jocher's avatar Glenn Jocher 提交者: GitHub

Automatic Chinese fonts plotting (#4951)

* Automatic Chinese fonts plotting * Default PIL=False
上级 c5ba2abb
...@@ -23,7 +23,7 @@ if str(ROOT) not in sys.path: ...@@ -23,7 +23,7 @@ if str(ROOT) not in sys.path:
from models.experimental import attempt_load from models.experimental import attempt_load
from utils.datasets import LoadImages, LoadStreams from utils.datasets import LoadImages, LoadStreams
from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \ from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \
increment_path, is_ascii, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \ increment_path, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \
strip_optimizer, xyxy2xywh strip_optimizer, xyxy2xywh
from utils.plots import Annotator, colors from utils.plots import Annotator, colors
from utils.torch_utils import load_classifier, select_device, time_sync from utils.torch_utils import load_classifier, select_device, time_sync
...@@ -108,7 +108,6 @@ def run(weights='yolov5s.pt', # model.pt path(s) ...@@ -108,7 +108,6 @@ def run(weights='yolov5s.pt', # model.pt path(s)
output_details = interpreter.get_output_details() # outputs output_details = interpreter.get_output_details() # outputs
int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model
imgsz = check_img_size(imgsz, s=stride) # check image size imgsz = check_img_size(imgsz, s=stride) # check image size
ascii = is_ascii(names) # names are ascii (use PIL for UTF-8)
# Dataloader # Dataloader
if webcam: if webcam:
...@@ -190,7 +189,7 @@ def run(weights='yolov5s.pt', # model.pt path(s) ...@@ -190,7 +189,7 @@ def run(weights='yolov5s.pt', # model.pt path(s)
s += '%gx%g ' % img.shape[2:] # print string s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
imc = im0.copy() if save_crop else im0 # for save_crop imc = im0.copy() if save_crop else im0 # for save_crop
annotator = Annotator(im0, line_width=line_thickness, pil=not ascii) annotator = Annotator(im0, line_width=line_thickness, example=str(names))
if len(det): if len(det):
# Rescale boxes from img_size to im0 size # Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
......
...@@ -18,7 +18,7 @@ from PIL import Image ...@@ -18,7 +18,7 @@ from PIL import Image
from torch.cuda import amp from torch.cuda import amp
from utils.datasets import exif_transpose, letterbox from utils.datasets import exif_transpose, letterbox
from utils.general import colorstr, increment_path, is_ascii, make_divisible, non_max_suppression, save_one_box, \ from utils.general import colorstr, increment_path, make_divisible, non_max_suppression, save_one_box, \
scale_coords, xyxy2xywh scale_coords, xyxy2xywh
from utils.plots import Annotator, colors from utils.plots import Annotator, colors
from utils.torch_utils import time_sync from utils.torch_utils import time_sync
...@@ -356,7 +356,6 @@ class Detections: ...@@ -356,7 +356,6 @@ class Detections:
self.imgs = imgs # list of images as numpy arrays self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names self.names = names # class names
self.ascii = is_ascii(names) # names are ascii (use PIL for UTF-8)
self.files = files # image filenames self.files = files # image filenames
self.xyxy = pred # xyxy pixels self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
...@@ -369,13 +368,13 @@ class Detections: ...@@ -369,13 +368,13 @@ class Detections:
def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')): def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):
crops = [] crops = []
for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):
str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string
if pred.shape[0]: if pred.shape[0]:
for c in pred[:, -1].unique(): for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum() # detections per class n = (pred[:, -1] == c).sum() # detections per class
str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
if show or save or render or crop: if show or save or render or crop:
annotator = Annotator(im, pil=not self.ascii) annotator = Annotator(im, example=str(self.names))
for *box, conf, cls in reversed(pred): # xyxy, confidence, class for *box, conf, cls in reversed(pred): # xyxy, confidence, class
label = f'{self.names[int(cls)]} {conf:.2f}' label = f'{self.names[int(cls)]} {conf:.2f}'
if crop: if crop:
...@@ -386,11 +385,11 @@ class Detections: ...@@ -386,11 +385,11 @@ class Detections:
annotator.box_label(box, label, color=colors(cls)) annotator.box_label(box, label, color=colors(cls))
im = annotator.im im = annotator.im
else: else:
str += '(no detections)' s += '(no detections)'
im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
if pprint: if pprint:
LOGGER.info(str.rstrip(', ')) LOGGER.info(s.rstrip(', '))
if show: if show:
im.show(self.files[i]) # show im.show(self.files[i]) # show
if save: if save:
......
...@@ -161,10 +161,9 @@ def is_pip(): ...@@ -161,10 +161,9 @@ def is_pip():
return 'site-packages' in Path(__file__).resolve().parts return 'site-packages' in Path(__file__).resolve().parts
def is_ascii(s=''): def is_chinese(s='人工智能'):
# Is string composed of all ASCII (no UTF) characters? # Is string composed of any Chinese characters?
s = str(s) # convert list, tuple, None, etc. to str return re.search('[\u4e00-\u9fff]', s)
return len(s.encode().decode('ascii', 'ignore')) == len(s)
def emojis(str=''): def emojis(str=''):
......
...@@ -17,7 +17,7 @@ import seaborn as sn ...@@ -17,7 +17,7 @@ import seaborn as sn
import torch import torch
from PIL import Image, ImageDraw, ImageFont from PIL import Image, ImageDraw, ImageFont
from utils.general import user_config_dir, is_ascii, xywh2xyxy, xyxy2xywh from utils.general import user_config_dir, is_chinese, xywh2xyxy, xyxy2xywh
from utils.metrics import fitness from utils.metrics import fitness
# Settings # Settings
...@@ -66,20 +66,21 @@ class Annotator: ...@@ -66,20 +66,21 @@ class Annotator:
check_font() # download TTF if necessary check_font() # download TTF if necessary
# YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=True): def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
self.pil = pil self.pil = pil or not example.isascii() or is_chinese(example)
if self.pil: # use PIL if self.pil: # use PIL
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
self.draw = ImageDraw.Draw(self.im) self.draw = ImageDraw.Draw(self.im)
self.font = check_font(font, size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) self.font = check_font(font='Arial.Unicode.ttf' if is_chinese(example) else font,
size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
else: # use cv2 else: # use cv2
self.im = im self.im = im
self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
# Add one xyxy box to image with label # Add one xyxy box to image with label
if self.pil or not is_ascii(label): if self.pil or not label.isascii():
self.draw.rectangle(box, width=self.lw, outline=color) # box self.draw.rectangle(box, width=self.lw, outline=color) # box
if label: if label:
w, h = self.font.getsize(label) # text width, height w, h = self.font.getsize(label) # text width, height
...@@ -177,7 +178,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max ...@@ -177,7 +178,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max
# Annotate # Annotate
fs = int((h + w) * ns * 0.01) # font size fs = int((h + w) * ns * 0.01) # font size
annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs) annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True)
for i in range(i + 1): for i in range(i + 1):
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论