Skip to content
This repository was archived by the owner on Apr 13, 2025. It is now read-only.

Commit 3b64bdd

Browse files
committed
first upload
1 parent 6c8c538 commit 3b64bdd

17 files changed

+1285
-2
lines changed

.gitignore

+7
Original file line numberDiff line numberDiff line change
@@ -127,3 +127,10 @@ dmypy.json
127127

128128
# Pyre type checker
129129
.pyre/
130+
131+
dataset/
132+
runs/
133+
checkpoints/
134+
valid/
135+
test/output_norm/
136+
test/output_disp/

README.md

+6-2
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,6 @@
1-
# Normal-map-generator
2-
Generate a normal map from a photo texture with UNet
1+
# Normal map generator
2+
Generate a normal map from a photo texture with UNet.
3+
4+
## About this Project
5+
6+

crawler/cc0_crawler.py

+124
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
# -*- coding: utf-8 -*-
2+
"""
3+
Created on Tue Jun 1 19:12:47 2021
4+
5+
@author: gama0
6+
"""
7+
8+
import requests
9+
from bs4 import BeautifulSoup
10+
import time
11+
from tqdm import tqdm
12+
from fake_useragent import UserAgent
13+
import copy
14+
import os
15+
16+
# a list of textures to NOT include
17+
filters = ['AcousticFoam',
18+
'Candy',
19+
'ChristmasTreeOrnament',
20+
'Facade',
21+
'Fence',
22+
'Fingerprint',
23+
'Foam',
24+
'OfficeCeiling',
25+
'Paint00',
26+
'Painting',
27+
'PineNeedles',
28+
'Porcelain',
29+
'Road',
30+
'Rust001',
31+
'Sign',
32+
'Scratches',
33+
'Smear',
34+
'Sticker',
35+
'Tape',
36+
'SurfaceImperfections',
37+
'Footsteps',
38+
'Substance']
39+
40+
# CC0 website:
41+
output_path = '../dataset/zip/'
42+
home = 'https://ambientcg.com/'
43+
url = 'https://ambientcg.com/list?type=PhotoTexturePBR&sort=Alphabet&limit=180'
44+
45+
if not os.path.exists(output_path):
46+
os.makedirs(output_path)
47+
48+
#%%
49+
def main():
50+
# agent to request
51+
user_agent = UserAgent()
52+
download_url = []
53+
54+
method_url = [url + '&method=HeightFieldPhotogrammetry',
55+
url + '&method=MultiAngleApproximation',
56+
url + '&method=SubstanceDesignerPhotoBased']
57+
58+
# for methods with multiple pages of content (1 page has 180 links)
59+
offsets = [0, 180, 360, 540]
60+
for offset in offsets:
61+
method_url.append(url + '&method=SubstanceDesignerProcedural' + '&offset={}'.format(offset))
62+
method_url.append(url + '&method=BitmapApproximation' + '&offset={}'.format(offset))
63+
64+
#%%
65+
# =========================== request the list to "download_url"===================
66+
for i in method_url:
67+
68+
re = requests.get(i, headers = { 'user-agent': user_agent.random })
69+
soup = BeautifulSoup(re.text,'html.parser')
70+
71+
elems = soup.find_all('a')
72+
for elem in elems:
73+
# texture_url + elem = each download url
74+
if elem.get('href').startswith("./view?id="):
75+
n = elem.get('href').split('/')[-1]
76+
download_url.append(home + n)
77+
78+
copy_download_url = copy.deepcopy(download_url)
79+
80+
# =========================== filters ===================
81+
# ( I don't know the reason why there's 99 elements need to be removed, but it can't do it completely at once.)
82+
n = 0
83+
for _ in range(5):
84+
for i in copy_download_url:
85+
for word in filters:
86+
if i.split('=')[-1].startswith(word):
87+
# print('remove this!', i)
88+
copy_download_url.remove(i)
89+
n += 1
90+
91+
copy_download_url.sort()
92+
93+
#%%
94+
# =========================== request download url ===================
95+
counts = 0
96+
for i in tqdm(range(len(copy_download_url))):
97+
# ==========================
98+
# 如果爬的過程斷掉,從多少開始...
99+
if i<counts:
100+
# counts += 1
101+
continue
102+
# ==========================
103+
# print(copy_download_url[i])
104+
re = requests.get(copy_download_url[i], # https://ambientcg.com/view?id=Tiles098
105+
headers = { 'user-agent': user_agent.random })
106+
soup = BeautifulSoup(re.text,'html.parser')
107+
108+
# if you want to choose png or JPG
109+
zip_url = []
110+
elems = soup.find_all('a', class_= "DownloadButton")
111+
for elem in elems:
112+
zip_url.append(elem.get('href'))
113+
114+
r = requests.get(zip_url[0], # https://ambientcg.com/get?file=Tiles098_1K-JPG.zip
115+
headers = { 'user-agent': user_agent.random })
116+
117+
with open(output_path + zip_url[0].split('=')[-1], "wb") as zipfile:
118+
zipfile.write(r.content)
119+
120+
time.sleep(0.1)
121+
counts += 1
122+
123+
if __name__ == "__main__":
124+
main()

crawler/cc0_unpack.py

+75
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
# -*- coding: utf-8 -*-
2+
"""
3+
Created on Thu May 20 10:01:56 2021
4+
5+
@author: Eric
6+
"""
7+
8+
import os
9+
import io
10+
from PIL import Image
11+
from tqdm import tqdm
12+
from zipfile import ZipFile, BadZipFile
13+
14+
DIR_IN = "../dataset/zip"
15+
DIR_OUT = "../dataset/train" # + color, normal, displace
16+
17+
color_dir = os.path.join(DIR_OUT, "color")
18+
normal_dir = os.path.join(DIR_OUT, "normal")
19+
displace_dir = os.path.join(DIR_OUT, "displacement")
20+
21+
if not os.path.exists(DIR_OUT):
22+
os.makedirs(DIR_OUT)
23+
if not os.path.exists(color_dir):
24+
os.makedirs(color_dir)
25+
if not os.path.exists(normal_dir):
26+
os.makedirs(normal_dir)
27+
if not os.path.exists(displace_dir):
28+
os.makedirs(displace_dir)
29+
30+
#%%
31+
def main():
32+
zip_list = [name for name in os.listdir(DIR_IN) if ".zip" in name]
33+
name_list = [name.split("_")[0] for name in zip_list]
34+
35+
for zip_name in tqdm(zip_list):
36+
filename = zip_name.split("_")[0]
37+
try:
38+
with ZipFile(os.path.join(DIR_IN, zip_name), "r") as z:
39+
40+
checklist = [0, 0, 0]
41+
for file in z.namelist():
42+
if "Color" in file:
43+
checklist[0] = 1
44+
color_file = file
45+
46+
if "Normal" in file:
47+
checklist[1] = 1
48+
norm_file = file
49+
50+
if "Displacement" in file:
51+
checklist[2] = 1
52+
disp_file = file
53+
54+
if 0 not in checklist:
55+
z.extract(color_file, color_dir)
56+
os.replace(os.path.join(color_dir, color_file), os.path.join(color_dir, filename) + ".jpg")
57+
58+
z.extract(norm_file, normal_dir)
59+
os.replace(os.path.join(normal_dir, norm_file), os.path.join(normal_dir, filename) + ".jpg")
60+
61+
z.extract(disp_file, displace_dir)
62+
os.replace(os.path.join(displace_dir, disp_file), os.path.join(displace_dir, filename) + ".jpg")
63+
64+
except BadZipFile:
65+
pass
66+
67+
print("\nExtraction Done!")
68+
69+
name_list = sorted(name_list)
70+
with open(os.path.join(DIR_OUT, "name_list.txt"), "w") as output:
71+
for name in name_list:
72+
output.write(name + "\n")
73+
74+
if __name__ == "__main__":
75+
main()

eval_disp.py

+99
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,99 @@
1+
# -*- coding: utf-8 -*-
2+
"""
3+
Created on Wed Jun 9 15:36:10 2021
4+
5+
@author: Eric
6+
"""
7+
8+
from model import Unet
9+
from utils import gray2rgb
10+
11+
import torch
12+
from torchvision import transforms
13+
from torch.utils.data import DataLoader
14+
from torch.utils.data import Dataset
15+
from torchvision.utils import save_image
16+
17+
import os
18+
import glob
19+
import numpy as np
20+
from tqdm import tqdm
21+
from time import sleep
22+
from PIL import Image
23+
import matplotlib.pyplot as plt
24+
25+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
26+
27+
#%%
28+
PATH_CHK = "checkpoints/disp/disp_net_epoch_010.pth"
29+
DIR_EVAL = "test"
30+
CROP = 512
31+
32+
#%%
33+
transform = transforms.Compose([
34+
transforms.Resize(CROP),
35+
transforms.CenterCrop(CROP),
36+
transforms.ToTensor(),
37+
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # (input - mean) / std
38+
# outputs range from -1 to 1
39+
])
40+
41+
gray_transform = transforms.Compose([
42+
transforms.Resize(CROP),
43+
transforms.CenterCrop(CROP),
44+
transforms.ToTensor(),
45+
transforms.Normalize(mean=(0.5), std=(0.5)) # (input - mean) / std
46+
# outputs range from -1 to 1
47+
])
48+
49+
class TestDataset(Dataset):
50+
def __init__(self, img_dir):
51+
self.file_list = glob.glob(img_dir+"/*.jpg")
52+
self.names = [os.path.splitext(os.path.basename(fp))[0] for fp in self.file_list]
53+
54+
def __len__(self):
55+
return len(self.names)
56+
57+
def __getitem__(self, i):
58+
img = Image.open(self.file_list[i]).convert('RGB')
59+
img = transform(img)
60+
61+
return img, self.names[i]
62+
63+
#%% test
64+
def test(net, in_folder, out_folder):
65+
output_normal = os.path.join(out_folder, "output")
66+
if not os.path.exists(output_normal):
67+
os.makedirs(output_normal)
68+
69+
data_test = TestDataset(in_folder)
70+
# print(batch_size)
71+
testloader = DataLoader(data_test, batch_size=1, shuffle=False)
72+
73+
print("\nOutput test files...")
74+
75+
net.eval()
76+
with torch.no_grad():
77+
for idx, data in enumerate(testloader):
78+
img_in = data[0].to(device)
79+
img_out = net(img_in)
80+
img_out = gray2rgb(img_out).to(device)
81+
# print(img_name)
82+
83+
img_out_filename = os.path.join(output_normal, f"{data[1][0]}.png")
84+
save_image(img_out, img_out_filename, value_range=(-1,1), normalize=True)
85+
86+
print("Done!")
87+
88+
#%%
89+
def main():
90+
input_folder = os.path.join(DIR_EVAL, "input")
91+
92+
disp_net = Unet(out_channels=1).to(device)
93+
checkpoint = torch.load(PATH_CHK)
94+
disp_net.load_state_dict(checkpoint["model"])
95+
96+
test(disp_net, input_folder, DIR_EVAL)
97+
98+
if __name__ == "__main__":
99+
main()

0 commit comments

Comments
 (0)