Skip to content

Commit b4d0d57

Browse files
author
bemova
committed
apply the pr
1 parent 05172d9 commit b4d0d57

15 files changed

+10
-38
lines changed

README.md

+5-8
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ Following image briefly describes how PatchUp works. It is the PatchUp process f
1919

2020
### Citation:
2121

22-
If you find this work useful and use it in your own research, please citing our [paper](http://google.com).
22+
If you find this work useful and use it in your own research, please consider citing our [paper](http://google.com).
2323
```
2424
2525
```
@@ -227,17 +227,17 @@ Note: To run the cutout experiment on SVHN, you should also set --cutout 20
227227

228228
# Experiments on Deformed Images
229229

230-
First, we have to creat a affine transformed test set. With simply run the following command:
230+
First, we need to create an affine transformed test set by running the following command:
231231
```
232232
python ./load_data.py --affine_path ./data/test/affine/
233233
```
234-
I creates affine transformed test set described in the paper for the CIFAR-100.
234+
We create affine transformed test set described in the paper for the CIFAR-100.
235235
After creating the the Deformed Images test set,
236236
we can run generalization experiment on Deformed Images (affine transformed test set) with same commands to train model with a regularization technique with two more parameters. Following is one command example that is used in Soft PatchUp.
237237
```
238238
python ./main.py --dataset cifar100 --data_dir ./data/cifar100/ --affine_test --affine_path ./data/test/affine/ --root_dir ./experiments/patchup/soft/ --labels_per_class 500 --valid_labels_per_class 50 --arch wrn28_10 --learning_rate 0.1 --momentum 0.9 --decay 0.0001 --epochs 400 --schedule 200 300 --step_factors 0.1 0.1 --train patchup --alpha 2.0 --patchup_type soft --patchup_block 7 --patchup_prob 1.0 --gamma 0.25 --job_id <JobID>
239239
```
240-
Note: Use above as an example command for running experiment on evaluating other approaches performance.
240+
Note: Use the above as a pattern to create a command to run an experiment for evaluating the performance of other approaches in this task.
241241
<br/>
242242
<hr/>
243243
<br/>
@@ -247,15 +247,12 @@ Note: Use above as an example command for running experiment on evaluating other
247247
in order to see the regularized models' robustness against the FGSM attack, we can use following parameter:
248248
* --fsgm_attack True
249249

250-
Following is an example of command that runs this experiment for training PreActResNet18 on CIFAR-10 with Soft PatchUp and evaluate its robustness against FGSM attack.
250+
The following command runs this experiment on PreActResNet18 in CIFAR-10 with Soft PatchUp and evaluate its robustness against the FGSM attack.
251251
```
252252
python ./main.py --dataset cifar10 --data_dir ./data/cifar10/ --fsgm_attack True --root_dir ./experiments/patchup/soft/ --labels_per_class 5000 --valid_labels_per_class 500 --arch <X> --learning_rate 0.1 --momentum 0.9 --decay 0.0001 --epochs 2000 --schedule 500 1000 1500 --step_factors 0.1 0.1 0.1 --train patchup --alpha 2.0 --patchup_type soft --patchup_block 7 --patchup_prob 1.0 --gamma 0.25 --job_id <JobID>
253253
```
254254
Note: Use above as an example command for running experiment on evaluating other approaches performance.
255255

256-
```shell script
257-
PR - versions
258-
```
259256

260257

261258

data_loader.py

-2
Original file line numberDiff line numberDiff line change
@@ -377,5 +377,3 @@ def test_load_transformed_test_sets(path='./data/test/affine/'):
377377
test_load_transformed_test_sets(path=args.affine_path)
378378

379379

380-
#PR_2
381-

main.py

-1
Original file line numberDiff line numberDiff line change
@@ -575,4 +575,3 @@ def main():
575575
if __name__ == '__main__':
576576
main()
577577

578-
#PR_2

models/__init__.py

-3
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,3 @@
1414
"""
1515
from .preresnet import preactresnet18, preactresnet34
1616
from .wide_resnet import wrn28_10, wrn28_2
17-
18-
19-
#PR

models/preresnet.py

+4-5
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,10 @@ def forward(self, x):
8080

8181

8282
class PreActResNet(nn.Module):
83+
"""
84+
In this implementation PreActResNet consist of a Convolutional module followed
85+
by 4 residual blocks and a fully connected layer for classification.
86+
"""
8387
def __init__(self, block, num_blocks, initial_channels, num_classes, per_img_std= False, stride=1, drop_block=7,
8488
keep_prob=.9, gamma=.9, patchup_block=7):
8589
super(PreActResNet, self).__init__()
@@ -89,8 +93,6 @@ def __init__(self, block, num_blocks, initial_channels, num_classes, per_img_std
8993
self.keep_prob = keep_prob
9094
self.gamma = gamma
9195
self.patchup_block = patchup_block
92-
93-
#import pdb; pdb.set_trace()
9496
self.dropblock = DropBlock(block_size=drop_block, keep_prob=keep_prob)
9597
self.conv1 = nn.Conv2d(3, initial_channels, kernel_size=3, stride=stride, padding=1, bias=False)
9698
self.patchup_0 = PatchUp(block_size=self.patchup_block, gamma=self.gamma)
@@ -247,6 +249,3 @@ def preactresnet34(num_classes=10, dropout = False, per_img_std = False, stride=
247249
patchup_block=7, patchup_prob=.7):
248250
return PreActResNet(PreActBlock, [3,4,6,3], 64, num_classes, per_img_std, stride= stride, drop_block=drop_block,
249251
keep_prob=keep_prob, gamma=gamma, patchup_block=patchup_block)
250-
251-
252-
#PR

models/wide_resnet.py

-3
Original file line numberDiff line numberDiff line change
@@ -215,7 +215,6 @@ def get_layer_mix_lam(self, lam, lam_selection, max_rank_glb, k):
215215

216216

217217
def wrn28_10(num_classes=10, dropout=False, per_img_std=False, stride=1, drop_block=7, keep_prob=.9, gamma=.9, patchup_block=7):
218-
# print ('this')
219218
model = Wide_ResNet(depth=28, widen_factor=10, num_classes=num_classes, per_img_std=per_img_std, stride=stride,
220219
drop_block=drop_block, keep_prob=keep_prob, gamma=gamma, patchup_block=patchup_block)
221220
return model
@@ -225,5 +224,3 @@ def wrn28_2(num_classes=10, dropout=False, per_img_std=False, stride=1, patchup_
225224
model = Wide_ResNet(depth=28, widen_factor=2, num_classes=num_classes, per_img_std=per_img_std, stride=stride,
226225
drop_block=drop_block, keep_prob=keep_prob, gamma=gamma, patchup_block=patchup_block)
227226
return model
228-
229-
#PR

modules/cutmix.py

-2
Original file line numberDiff line numberDiff line change
@@ -43,5 +43,3 @@ def apply(self, inputs, target):
4343
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (inputs.size()[-1] * inputs.size()[-2]))
4444
# compute output
4545
return target_a, target_b, inputs, lam
46-
47-
#PR

modules/cutout.py

-2
Original file line numberDiff line numberDiff line change
@@ -56,5 +56,3 @@ def apply(self, img):
5656
mask = mask.expand_as(img).to(device)
5757
img = img * mask
5858
return img
59-
60-
#PR

modules/drop_block.py

-2
Original file line numberDiff line numberDiff line change
@@ -56,5 +56,3 @@ def forward(self, x):
5656
mask = 1 - F.max_pool2d(m_i_j, self.kernel_size, self.stride, self.padding)
5757
# Normalize the features according to the DorpBlock algorithm described in the DropBlock paper.
5858
return mask * x * (mask.numel() / mask.sum())
59-
60-
#PR

modules/mixup.py

-2
Original file line numberDiff line numberDiff line change
@@ -39,5 +39,3 @@ def get_lambda(alpha=1.0):
3939
else:
4040
lam = 1.
4141
return lam
42-
43-
#PR

modules/patchup.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ class PatchUp(nn.Module):
1717
"""
1818
PatchUp Module.
1919
20-
This module is responsible to apply either Soft PatchUp or Hard PatchUp after a convolutional module
20+
This module is responsible for applying either Soft PatchUp or Hard PatchUp after a Convolutional module
2121
or convolutional residual block.
2222
"""
2323
def __init__(self, block_size=7, gamma=0.9, patchup_type=PatchUpMode.SOFT):
@@ -136,6 +136,5 @@ def forward(self, x, targets=None, lam=None, patchup_type=PatchUpMode.SOFT):
136136
target_a = targets
137137
return target_a, target_b, target_reweighted, x, total_unchanged_portion
138138

139-
#PR
140139

141140

utility/activation_study.py

-1
Original file line numberDiff line numberDiff line change
@@ -176,4 +176,3 @@ def main():
176176
if __name__ == '__main__':
177177
main()
178178

179-
#PR

utility/adversarial_attack.py

-2
Original file line numberDiff line numberDiff line change
@@ -86,5 +86,3 @@ def run_test_adversarial(net, loader, epsilon):
8686
t_accuracy = 100. * correct * 1.0 / total
8787
t_loss = t_loss / total
8888
return t_accuracy, t_loss
89-
90-
#PR

utility/plots.py

-2
Original file line numberDiff line numberDiff line change
@@ -52,5 +52,3 @@ def plotting(exp_dir):
5252

5353
if __name__ == '__main__':
5454
plotting('temop')
55-
56-
#PR_2

utility/utils.py

-1
Original file line numberDiff line numberDiff line change
@@ -233,7 +233,6 @@ def copy_script_to_folder(caller_path, folder):
233233
# Copying script
234234
shutil.copy(caller_path, script_relative_path)
235235

236-
#PR_2
237236

238237

239238

0 commit comments

Comments
 (0)