Skip to content

Commit 7c12198

Browse files
committed
main
1 parent c675bd5 commit 7c12198

File tree

213 files changed

+46368
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

213 files changed

+46368
-0
lines changed

.idea/.gitignore

+8
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

.idea/BFDA_old_yolov5.iml

+8
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

.idea/deployment.xml

+79
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

.idea/encodings.xml

+7
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

.idea/inspectionProfiles/profiles_settings.xml

+6
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

.idea/misc.xml

+4
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

.idea/modules.xml

+8
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cheat.py

+100
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
from domain_ad import *
2+
3+
def cheatbackbone_back(feature,d_1,d_2,d_3,d_4,MY_4,MY_1,TRAIN_GANLOSS,ADD):
4+
d_out_first = d_1(feature[0]) # d_out_first:[b,2,8,8]
5+
if not MY_4: # 使用张铂师兄的方法
6+
if MY_1:
7+
source_label = 0.5
8+
if TRAIN_GANLOSS == 'BCE':
9+
loss_adv_trg_first = bce_loss(d_out_first, source_label)
10+
elif TRAIN_GANLOSS == 'LS':
11+
loss_adv_trg_first = ls_loss(d_out_first, source_label)
12+
if ADD == 1:
13+
d_out_first_add = d_first_add(feature_trg[0]) # d_out_first:[24,1,2,2]
14+
if TRAIN_GANLOSS == 'BCE':
15+
loss_adv_trg_first_add = bce_loss(d_out_first_add, source_label)
16+
elif TRAIN_GANLOSS == 'LS':
17+
loss_adv_trg_first_add = ls_loss(d_out_first_add, source_label)
18+
if TRAIN_LAYER > 1:
19+
if ddcpp_2 == 0:
20+
d_out_second = d_second(feature_trg[1]) # ([24,1,1,1])
21+
if ddcpp_2 == 1:
22+
d_out_second = dis_model_2(feature_trg[1]) # ([24,1,1,1])
23+
if TRAIN_GANLOSS == 'BCE':
24+
loss_adv_trg_second = bce_loss(d_out_second, source_label)
25+
elif TRAIN_GANLOSS == 'LS':
26+
loss_adv_trg_second = ls_loss(d_out_second, source_label)
27+
if TRAIN_LAYER > 2:
28+
d_out_third = d_third(feature_trg[2]) # ([24,1,1,1])
29+
if TRAIN_GANLOSS == 'BCE':
30+
loss_adv_trg_third = bce_loss(d_out_third, source_label)
31+
elif TRAIN_GANLOSS == 'LS':
32+
loss_adv_trg_third = ls_loss(d_out_third, source_label)
33+
if TRAIN_LAYER > 3:
34+
d_out_forth = d_forth(feature_trg[3]) # ([24,1,1,1])
35+
if TRAIN_GANLOSS == 'BCE':
36+
loss_adv_trg_forth = bce_loss(d_out_forth, source_label)
37+
elif TRAIN_GANLOSS == 'LS':
38+
loss_adv_trg_forth = ls_loss(d_out_forth, source_label)
39+
loss_adv = TRAIN_LAMBADA_ADV_FIRST * loss_adv_trg_first
40+
if ADD == 1:
41+
loss_adv = loss_adv + TRAIN_LAMBADA_ADV_FIRST * loss_adv_trg_first_add
42+
if TRAIN_LAYER > 1:
43+
loss_adv = loss_adv + TRAIN_LAMBADA_ADV_SECOND * loss_adv_trg_second
44+
if TRAIN_LAYER > 2:
45+
loss_adv = loss_adv + TRAIN_LAMBADA_ADV_THIRD * loss_adv_trg_third
46+
if TRAIN_LAYER > 3:
47+
loss_adv = loss_adv + TRAIN_LAMBADA_ADV_FORTH * loss_adv_trg_forth
48+
if MY_6 and not MY_13:
49+
loss_adv += loss_variance_trg
50+
loss_adv = loss_adv
51+
scaler.scale(loss_adv).backward()
52+
53+
if MY_4: # 用反梯度
54+
if MY_1:
55+
target_label = 0.5
56+
if TRAIN_GANLOSS == 'BCE':
57+
loss_adv_trg_first = bce_loss(d_out_first, target_label)
58+
elif TRAIN_GANLOSS == 'LS':
59+
loss_adv_trg_first = ls_loss(d_out_first, target_label)
60+
if ADD == 1:
61+
d_out_first_add = d_first_add(feature_trg[0]) # d_out_first:[24,1,2,2]
62+
if TRAIN_GANLOSS == 'BCE':
63+
loss_adv_trg_first_add = bce_loss(d_out_first_add, target_label)
64+
elif TRAIN_GANLOSS == 'LS':
65+
loss_adv_trg_first_add = ls_loss(d_out_first_add, target_label)
66+
if TRAIN_LAYER > 1:
67+
if ddcpp_2 == 0:
68+
d_out_second = d_second(feature_trg[1]) # ([24,1,1,1])
69+
if ddcpp_2 == 1:
70+
d_out_second = dis_model_2(feature_trg[1]) # ([24,1,1,1])
71+
if TRAIN_GANLOSS == 'BCE':
72+
loss_adv_trg_second = bce_loss(d_out_second, target_label)
73+
elif TRAIN_GANLOSS == 'LS':
74+
loss_adv_trg_second = ls_loss(d_out_second, target_label)
75+
if TRAIN_LAYER > 2:
76+
d_out_third = d_third(feature_trg[2]) # ([24,1,1,1])
77+
if TRAIN_GANLOSS == 'BCE':
78+
loss_adv_trg_third = bce_loss(d_out_third, target_label)
79+
elif TRAIN_GANLOSS == 'LS':
80+
loss_adv_trg_third = ls_loss(d_out_third, target_label)
81+
if TRAIN_LAYER > 3:
82+
d_out_forth = d_forth(feature_trg[3]) # ([24,1,1,1])
83+
if TRAIN_GANLOSS == 'BCE':
84+
loss_adv_trg_forth = bce_loss(d_out_forth, target_label)
85+
elif TRAIN_GANLOSS == 'LS':
86+
loss_adv_trg_forth = ls_loss(d_out_forth, target_label)
87+
loss_adv = TRAIN_LAMBADA_ADV_FIRST * loss_adv_trg_first
88+
if ADD == 1:
89+
loss_adv = loss_adv + TRAIN_LAMBADA_ADV_FIRST * loss_adv_trg_first_add
90+
if TRAIN_LAYER > 1:
91+
loss_adv = loss_adv + TRAIN_LAMBADA_ADV_SECOND * loss_adv_trg_second
92+
if TRAIN_LAYER > 2:
93+
loss_adv = loss_adv + TRAIN_LAMBADA_ADV_THIRD * loss_adv_trg_third
94+
if TRAIN_LAYER > 3:
95+
loss_adv = loss_adv + TRAIN_LAMBADA_ADV_FORTH * loss_adv_trg_forth
96+
loss_adv = - loss_adv
97+
if MY_6 and not MY_13:
98+
loss_adv += loss_variance_trg
99+
loss_adv = loss_adv
100+
scaler.scale(loss_adv).backward()

MR_2.py

+16
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import os
2+
from pycocotools.coco import COCO
3+
from eval_MR_multisetup import COCOeval
4+
5+
def validate(annFile, dt_path):
6+
mean_MR = []
7+
for id_setup in range(0, 4):
8+
cocoGt = COCO(annFile)#标注
9+
cocoDt = cocoGt.loadRes(dt_path)#预测
10+
imgIds = sorted(cocoGt.getImgIds())
11+
cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
12+
cocoEval.params.imgIds = imgIds
13+
cocoEval.evaluate(id_setup)
14+
cocoEval.accumulate()
15+
mean_MR.append(cocoEval.summarize_nofile(id_setup))
16+
return mean_MR

check_deeplabv3_ss.py

+32
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
import torch
2+
# import matplotlib
3+
# matplotlib.use('TkAgg')
4+
import matplotlib.pyplot as plt
5+
from PIL import Image
6+
import numpy as np
7+
from torchvision import models
8+
from torchvision import transforms
9+
import cv2
10+
import time
11+
12+
size = (640,320)
13+
# 加载deeplabv3模型进行语义分割
14+
model = models.segmentation.deeplabv3_resnet101(pretrained=True)
15+
model = model.eval()
16+
17+
img = cv2.imread(r'/remote-home/caiyancheng/BFDA_datasets/cityscapes/images/val_all/frankfurt/frankfurt_000001_057181_leftImg8bit.png')
18+
img = cv2.resize(img, size)
19+
img = torch.tensor(img).permute(2,0,1)[None,...]/255
20+
21+
start_time = time.time()
22+
output = model(img)
23+
end_time = time.time()
24+
time_using = start_time - end_time
25+
print(time_using)
26+
print(output['out'].shape)
27+
output = torch.argmax(output['out'].squeeze(), dim=0).detach().cpu().numpy()
28+
29+
output[output==15] = 255
30+
output[output!=255] = 0
31+
32+
cv2.imwrite(f'/remote-home/caiyancheng/outlabel_frankfurt_000001_057181_leftImg8bit_size_{size[0]}_size{size[1]}_time_{time_using}.png', np.array(output))

0 commit comments

Comments
 (0)