what.examples.yolov3_pcb_attack_demo
1import cv2 2import numpy as np 3 4from what.models.detection.datasets.coco import COCO_CLASS_NAMES 5from what.models.detection.utils.box_utils import draw_bounding_boxes 6from what.models.detection.yolo.utils.yolo_utils import yolo_process_output, yolov3_anchors, yolov3_tiny_anchors 7 8from what.attacks.detection.yolo.PCB import PCBAttack 9from what.utils.resize import bilinear_resize 10 11from what.cli.model import * 12from what.utils.file import get_file 13 14def yolov3_pcb_attack_demo(): 15 16 # Target Model 17 what_yolov3_model_list = what_model_list[0:4] 18 19 classes = COCO_CLASS_NAMES 20 21 colors = np.random.uniform(0, 255, size=(len(classes), 3)) 22 23 # Check what_model_list for all supported models 24 index = 3 25 26 # Download the model first if not exists 27 if not os.path.isfile(os.path.join(WHAT_MODEL_PATH, what_yolov3_model_list[index][WHAT_MODEL_FILE_INDEX])): 28 get_file(what_yolov3_model_list[index][WHAT_MODEL_FILE_INDEX], 29 WHAT_MODEL_PATH, 30 what_yolov3_model_list[index][WHAT_MODEL_URL_INDEX], 31 what_yolov3_model_list[index][WHAT_MODEL_HASH_INDEX]) 32 33 # Adversarial Attack 34 model_path = os.path.join(WHAT_MODEL_PATH, what_yolov3_model_list[index][WHAT_MODEL_FILE_INDEX]) 35 attack = PCBAttack(model_path, "multi_untargeted", classes, decay=0.99) 36 attack.fixed = False 37 38 last_outs = None 39 last_boxes = None 40 last_probs = None 41 42 # Initialize the camera 43 video = input(f"Please input the OpenCV capture device (e.g. 0, 1, 2): ") 44 45 while not video.isdigit(): 46 video = input(f"Please input the OpenCV capture device (e.g. 0, 1, 2): ") 47 48 # Capture from camera 49 cap = cv2.VideoCapture(int(video)) 50 #cap.set(3, 1920) 51 #cap.set(4, 1080) 52 53 while(True): 54 # Capture the video frame 55 success, origin_cv_image = cap.read() # read the camera frame 56 if not success: 57 break 58 59 # For YOLO, the input pixel values are normalized to [0, 1] 60 input_cv_image = cv2.resize(origin_cv_image, (416, 416)) 61 input_cv_image = np.array(input_cv_image).astype(np.float32) / 255.0 62 63 # Image preprocessing 64 input_cv_image = cv2.cvtColor(input_cv_image, cv2.COLOR_BGR2RGB) 65 66 # Yolo inference 67 input_cv_image, outs = attack.attack(input_cv_image) 68 69 if last_outs is not None: 70 res_list = [] 71 for out, last_out in zip(outs, last_outs): 72 out = out.reshape((-1, 5 + len(classes))) 73 last_out = last_out.reshape((-1, 5 + len(classes))) 74 75 res = np.mean(out[:, 4] - last_out[:, 4]) 76 res_list.append(res) 77 78 else: 79 last_outs = outs 80 81 boxes, labels, probs = yolo_process_output(outs, yolov3_tiny_anchors, len(classes)) 82 83 if last_boxes is not None: 84 # Eliminate the boxes with low confidence and overlaped boxes 85 if last_boxes.size > 0 and boxes.size > 0: 86 indexes = cv2.dnn.NMSBoxes(np.vstack((boxes, last_boxes)).tolist(), np.hstack((np.array(probs), np.array(last_probs))), 0.5, 0.4) 87 if len(indexes) > 0: 88 indexes = indexes.flatten() 89 90 last_boxes = np.copy(boxes) 91 last_probs = np.copy(probs) 92 93 # Draw bounding boxes 94 out_img = cv2.cvtColor(origin_cv_image, cv2.COLOR_BGR2RGB) 95 out_img = out_img.astype(np.float32) / 255.0 96 height, width, _ = out_img.shape 97 noise = attack.noise 98 99 # Resize the noise to the same shape as the input image 100 # noise_r = bilinear_resize(noise[:, :, 0], height, width) 101 # noise_g = bilinear_resize(noise[:, :, 1], height, width) 102 # noise_b = bilinear_resize(noise[:, :, 2], height, width) 103 # noise = np.dstack((noise_r, noise_g, noise_b)) 104 105 # out_img = out_img + noise 106 out_img = np.clip(out_img, 0, 1) 107 108 out_img = (out_img * 255.0).astype(np.uint8) 109 110 # for i in range(boxes.shape[0]): 111 # logger.info(f"{classes[labels[i]]}: {probs[i]:.2f}") 112 113 out_img = cv2.cvtColor(out_img, cv2.COLOR_RGB2BGR) 114 if len(boxes) > 0: 115 out_img = draw_bounding_boxes(out_img, boxes, labels, classes, probs); 116 117 cv2.namedWindow("result", cv2.WINDOW_NORMAL) 118 cv2.imshow("result", out_img) 119 120 if (cv2.waitKey(1) & 0xFF == ord('q')): 121 break
def
yolov3_pcb_attack_demo():
15def yolov3_pcb_attack_demo(): 16 17 # Target Model 18 what_yolov3_model_list = what_model_list[0:4] 19 20 classes = COCO_CLASS_NAMES 21 22 colors = np.random.uniform(0, 255, size=(len(classes), 3)) 23 24 # Check what_model_list for all supported models 25 index = 3 26 27 # Download the model first if not exists 28 if not os.path.isfile(os.path.join(WHAT_MODEL_PATH, what_yolov3_model_list[index][WHAT_MODEL_FILE_INDEX])): 29 get_file(what_yolov3_model_list[index][WHAT_MODEL_FILE_INDEX], 30 WHAT_MODEL_PATH, 31 what_yolov3_model_list[index][WHAT_MODEL_URL_INDEX], 32 what_yolov3_model_list[index][WHAT_MODEL_HASH_INDEX]) 33 34 # Adversarial Attack 35 model_path = os.path.join(WHAT_MODEL_PATH, what_yolov3_model_list[index][WHAT_MODEL_FILE_INDEX]) 36 attack = PCBAttack(model_path, "multi_untargeted", classes, decay=0.99) 37 attack.fixed = False 38 39 last_outs = None 40 last_boxes = None 41 last_probs = None 42 43 # Initialize the camera 44 video = input(f"Please input the OpenCV capture device (e.g. 0, 1, 2): ") 45 46 while not video.isdigit(): 47 video = input(f"Please input the OpenCV capture device (e.g. 0, 1, 2): ") 48 49 # Capture from camera 50 cap = cv2.VideoCapture(int(video)) 51 #cap.set(3, 1920) 52 #cap.set(4, 1080) 53 54 while(True): 55 # Capture the video frame 56 success, origin_cv_image = cap.read() # read the camera frame 57 if not success: 58 break 59 60 # For YOLO, the input pixel values are normalized to [0, 1] 61 input_cv_image = cv2.resize(origin_cv_image, (416, 416)) 62 input_cv_image = np.array(input_cv_image).astype(np.float32) / 255.0 63 64 # Image preprocessing 65 input_cv_image = cv2.cvtColor(input_cv_image, cv2.COLOR_BGR2RGB) 66 67 # Yolo inference 68 input_cv_image, outs = attack.attack(input_cv_image) 69 70 if last_outs is not None: 71 res_list = [] 72 for out, last_out in zip(outs, last_outs): 73 out = out.reshape((-1, 5 + len(classes))) 74 last_out = last_out.reshape((-1, 5 + len(classes))) 75 76 res = np.mean(out[:, 4] - last_out[:, 4]) 77 res_list.append(res) 78 79 else: 80 last_outs = outs 81 82 boxes, labels, probs = yolo_process_output(outs, yolov3_tiny_anchors, len(classes)) 83 84 if last_boxes is not None: 85 # Eliminate the boxes with low confidence and overlaped boxes 86 if last_boxes.size > 0 and boxes.size > 0: 87 indexes = cv2.dnn.NMSBoxes(np.vstack((boxes, last_boxes)).tolist(), np.hstack((np.array(probs), np.array(last_probs))), 0.5, 0.4) 88 if len(indexes) > 0: 89 indexes = indexes.flatten() 90 91 last_boxes = np.copy(boxes) 92 last_probs = np.copy(probs) 93 94 # Draw bounding boxes 95 out_img = cv2.cvtColor(origin_cv_image, cv2.COLOR_BGR2RGB) 96 out_img = out_img.astype(np.float32) / 255.0 97 height, width, _ = out_img.shape 98 noise = attack.noise 99 100 # Resize the noise to the same shape as the input image 101 # noise_r = bilinear_resize(noise[:, :, 0], height, width) 102 # noise_g = bilinear_resize(noise[:, :, 1], height, width) 103 # noise_b = bilinear_resize(noise[:, :, 2], height, width) 104 # noise = np.dstack((noise_r, noise_g, noise_b)) 105 106 # out_img = out_img + noise 107 out_img = np.clip(out_img, 0, 1) 108 109 out_img = (out_img * 255.0).astype(np.uint8) 110 111 # for i in range(boxes.shape[0]): 112 # logger.info(f"{classes[labels[i]]}: {probs[i]:.2f}") 113 114 out_img = cv2.cvtColor(out_img, cv2.COLOR_RGB2BGR) 115 if len(boxes) > 0: 116 out_img = draw_bounding_boxes(out_img, boxes, labels, classes, probs); 117 118 cv2.namedWindow("result", cv2.WINDOW_NORMAL) 119 cv2.imshow("result", out_img) 120 121 if (cv2.waitKey(1) & 0xFF == ord('q')): 122 break