Commit 00a44831 authored by Pavlo Beylin's avatar Pavlo Beylin
Browse files

Implement tapering transformations. Fix noise-induced transparency issues.

parent feeb0bf1
......@@ -42,7 +42,7 @@ classes = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus",
"keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator",
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
PATH = "cat_patch0.jpg"
PATH = "saved_patches/fatcat.jpg"
PATCH_SIZE = 300
total_variation = TotalVariation()
......@@ -171,25 +171,40 @@ if __name__ == "__main__":
raise IOError("We cannot open webcam")
patch = read_image(PATH)
patch = torch.rand_like(patch)
# patch = torch.rand_like(patch)
patch.requires_grad = True
optimizer = optim.Adam([patch], lr=0.0001, amsgrad=True)
gradient_sum = 0
img_size_x = 640
# img_size_x = 640
img_size_x = 480
img_size_y = 480
ctr = -1
pred = -1
# Launch Settings
# move = True
# rotate = True
# taper = True
# resize = True
# squeeze = True
# gauss = True
# obfuscate = True
# stretch = True
move = False
rotate = False
taper = False
resize = False
squeeze = False
gauss = True
gauss = False
obfuscate = False
stretch = False
transform_interval = 1
angle_step = 5
tv_factor = 1
ctr = -1
pred = -1
frame_read = False
fix_frame = False
patch_transformer.maxangle = 5/180 * math.pi
......@@ -199,6 +214,9 @@ if __name__ == "__main__":
if not (fix_frame and frame_read):
ret, frame = cap.read()
# cut image
frame = frame[:, :img_size_x, :]
with torch.set_grad_enabled(True):
# with torch.autograd.detect_anomaly():
......@@ -217,10 +235,14 @@ if __name__ == "__main__":
if ctr % 1 == 0:
trans_patch = patch_transformer(patch.cuda(), torch.ones([1, 14, 5]).cuda(), img_size_x, img_size_y,
do_rotate=rotate, rand_loc=move, rand_size=resize,
rand_squeeze=squeeze, gauss=gauss)
rand_squeeze=squeeze, gauss=gauss, obfuscate=obfuscate,
stretch=stretch, do_taper=taper)
# extract bounding box (x1, y1, x2, y2)
bounding_box = extract_bounding_box(trans_patch)
try:
bounding_box = extract_bounding_box(trans_patch)
except Exception:
print("zero-sized patch ... ")
# apply patch
frame = patch_applier(frame_original, trans_patch)
......@@ -233,9 +255,11 @@ if __name__ == "__main__":
# debug_preds()
pass
# iou, pred = get_best_prediction(bounding_box, raw_results, 15) # get cats
# iou, pred = get_best_prediction(bounding_box, raw_results, 12) # get parking meters
iou, pred = get_best_prediction(bounding_box, raw_results, 11) # get stop signs
iou, pred = get_best_prediction(bounding_box, raw_results, 15) # get cat
# iou, pred = get_best_prediction(bounding_box, raw_results, 12) # get parking meter
# iou, pred = get_best_prediction(bounding_box, raw_results, 11) # get stop sign
# iou, pred = get_best_prediction(bounding_box, raw_results, 8) # get boat
# iou, pred = get_best_prediction(bounding_box, raw_results, 62) # get tv
# pred = get_best_prediction(bounding_box, raw_results, 42) # get forked
# pred = get_avg_prediction(raw_results, 15) # make everything cats
......@@ -299,6 +323,15 @@ if __name__ == "__main__":
if key == ord("g"):
gauss = not gauss
print("Gauss: {}".format(gauss))
if key == ord("p"):
taper = not taper
print("Taper: {}".format(taper))
if key == ord("h"):
obfuscate = not obfuscate
print(f"Obfuscate: {obfuscate}")
if key == ord("e"):
stretch = not stretch
print(f"Obfuscate: {obfuscate}")
if key == ord("+"):
transform_interval += 1
print("Transform Interval: {}".format(transform_interval))
......
import math
import random
import cv2
import torchvision
from torch.nn.modules.utils import _pair, _quadruple
import torch.nn.functional as F
import numpy as np
import torch
import matplotlib.pyplot as plt
from torch import nn, Tensor
def show(img):
plt.imshow(img.detach().cpu())
plt.show()
class PatchApplier(nn.Module):
"""PatchApplier: applies adversarial patches to images.
......@@ -103,7 +108,7 @@ class PatchTransformer(nn.Module):
def forward(self, adv_patch, lab_batch, img_size_x, img_size_y,
do_rotate=True, rand_loc=True, rand_size=True, gauss=True, max_sigma=6,
obfuscate=True, max_obs_part=1, rand_squeeze=True):
obfuscate=True, max_obs_part=1, rand_squeeze=True, stretch=True, do_taper=True):
# Obfuscation
if obfuscate:
......@@ -111,8 +116,8 @@ class PatchTransformer(nn.Module):
max_y = adv_patch.shape[2]
obfuscation_shape = [int(random.random() * max_obs_part * max_x),
int(random.random() * max_obs_part * max_y)]
obfuscation_root = [int(random.random() * (max_x-obfuscation_shape[0])),
int(random.random() * (max_y-obfuscation_shape[1]))]
obfuscation_root = [int(random.random() * (max_x - obfuscation_shape[0])),
int(random.random() * (max_y - obfuscation_shape[1]))]
adv_patch[:, obfuscation_root[0]:obfuscation_root[0] + obfuscation_shape[0],
obfuscation_root[1]:obfuscation_root[1] + obfuscation_shape[1]] = 0.001
......@@ -145,9 +150,9 @@ class PatchTransformer(nn.Module):
noise = torch.cuda.FloatTensor(adv_batch.size()).uniform_(-1, 1) * self.noise_factor
# Apply contrast/brightness/noise, clamp
adv_batch = adv_batch * contrast + brightness + noise
adv_batch = adv_batch * contrast + brightness + noise
adv_batch = torch.clamp(adv_batch, 0.000001, 0.99999)
adv_batch = torch.clamp(adv_batch, 0.01, 0.99999)
# TODO should that not be class 0 if we do not want to cover person? cls=1 would be bicycle ...
# Where the label class_id is 1 we don't want a patch (padding) --> fill mask with zero's
......@@ -196,8 +201,11 @@ class PatchTransformer(nn.Module):
else resize_factor_x
scale_x = resize_factor_x * target_size / current_patch_size
scale_y = resize_factor_y * target_size / current_patch_size
scale_x = scale_x.view(anglesize)
scale_y = scale_y.view(anglesize)
scale_x = scale_x.view(anglesize) * (random.random() + 0.3 if stretch else 1)
scale_y = scale_y.view(anglesize) * (random.random() + 0.3 if stretch else 1)
scale_x = torch.where(scale_x > 0, scale_x, torch.tensor(1, dtype=scale_x.dtype).cuda())
scale_y = torch.where(scale_y > 0, scale_y, torch.tensor(1, dtype=scale_y.dtype).cuda())
s = adv_batch.size()
adv_batch = adv_batch.view(s[0] * s[1], s[2], s[3], s[4])
......@@ -217,27 +225,30 @@ class PatchTransformer(nn.Module):
theta[:, 1, 1] = cos / scale_y
theta[:, 1, 2] = -tx * sin / scale_y + ty * cos / scale_x
# Tapering Matrix
taper = torch.cuda.FloatTensor(anglesize, 2, 3).fill_(0)
taper[:, 0, 0] = 1
taper[:, 1, 0] = random.choice([-1, 1]) * random.random()
taper[:, 0, 1] = random.choice([-1, 1]) * random.random() * 0.1
taper[:, 1, 1] = 1
b_sh = adv_batch.shape
grid = F.affine_grid(theta, adv_batch.shape, align_corners=False)
grid_taper = F.affine_grid(taper, adv_batch.shape, align_corners=False)
# show(grid[0][:, :, 0])
# show(grid[0][:, :, 1])
adv_batch_t = F.grid_sample(adv_batch, grid, align_corners=False)
if do_taper:
adv_batch_t = F.grid_sample(adv_batch_t, grid_taper, align_corners=False)
msk_batch_t = F.grid_sample(msk_batch, grid, align_corners=False)
'''
# Theta2 = translation matrix
theta2 = torch.cuda.FloatTensor(anglesize, 2, 3).fill_(0)
theta2[:, 0, 0] = 1
theta2[:, 0, 1] = 0
theta2[:, 0, 2] = (-target_x + 0.5) * 2
theta2[:, 1, 0] = 0
theta2[:, 1, 1] = 1
theta2[:, 1, 2] = (-target_y + 0.5) * 2
grid2 = F.affine_grid(theta2, adv_batch.shape)
adv_batch_t = F.grid_sample(adv_batch_t, grid2)
msk_batch_t = F.grid_sample(msk_batch_t, grid2)
# # pad
# target = torch.zeros_like(adv_batch)
# target[:, :, :, :adv_batch.shape[-2]] = adv_batch_t
# adv_batch_t = target
'''
adv_batch_t = adv_batch_t.view(s[0], s[1], s[2], s[3], s[4])
msk_batch_t = msk_batch_t.view(s[0], s[1], s[2], s[3], s[4])
......@@ -254,7 +265,7 @@ class PatchTransformer(nn.Module):
if gauss:
adv_batch_t = self.gauss_filter(adv_batch_t, random.random() * max_sigma)
return adv_batch_t # * msk_batch_t
return adv_batch_t.flip(-1) # * msk_batch_t
def gauss_filter(self, img, sigma):
......@@ -298,4 +309,4 @@ class PatchTransformer(nn.Module):
gaussian_filter.cuda()
return gaussian_filter.forward(img.T.unsqueeze(0)).squeeze(0).T.flip(-1)
return gaussian_filter.forward(img.T.unsqueeze(0)).squeeze(0).T
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment