Skip to content

Commit 877ffa8

Browse files
committed
1.3.2.190214
1 parent 7ecfb1c commit 877ffa8

File tree

11 files changed

+68
-48
lines changed

11 files changed

+68
-48
lines changed

AutoAi/data/dataset.py

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
from utitls.imagefolder_splitter import ImageFolderSplitter
2+
import os, csv, cv2
3+
4+
RESIZE = 128
5+
6+
IMG_DIR = '/home/tian/Desktop/spiders/design/design/spiders/image'
7+
# Folder for storing testing images
8+
RESIZE_TRAIN_IMG_DIR = './data/resize/train'
9+
RESIZE_TEST_IMG_DIR = './data/resize/test'
10+
11+
# Path to generate csv file
12+
TRAIN_CSV_DIR = './data/train_labels.csv'
13+
TEST_CSV_DIR = './data/test_labels.csv'
14+
15+
16+
# write csv
17+
def write_csv(img_dir, csv_dir):
18+
list = []
19+
list.append(['File Name', 'Label'])
20+
for file_name in os.listdir(img_dir):
21+
for img in os.listdir("%s/%s" % (img_dir, file_name)):
22+
item = [file_name + "/" + img, file_name]
23+
list.append(item)
24+
f = open(csv_dir, 'w')
25+
writer = csv.writer(f)
26+
writer.writerows(list)
27+
28+
29+
# resize images
30+
def resize_img(path, data):
31+
for i, img_file in enumerate(data[0]):
32+
cls_name = data[1][i]
33+
try:
34+
img = cv2.imread(img_file)
35+
img = cv2.resize(img, (RESIZE, RESIZE), interpolation=cv2.INTER_LINEAR)
36+
except:
37+
print(img_file)
38+
continue
39+
img_name = img_file[img_file.rfind('/') + 1:]
40+
if os.path.exists("%s/%s" % (path, cls_name)):
41+
cv2.imwrite("%s/%s/%s" % (path, cls_name, img_name), img)
42+
else:
43+
os.makedirs("%s/%s" % (path, cls_name))
44+
cv2.imwrite("%s/%s/%s" % (path, cls_name, img_name), img)
45+
46+
47+
if __name__ == '__main__':
48+
splitter = ImageFolderSplitter(IMG_DIR)
49+
# print("Resize images...")
50+
resize_img(RESIZE_TRAIN_IMG_DIR, splitter.getTrainingDataset())
51+
resize_img(RESIZE_TEST_IMG_DIR, splitter.getValidationDataset())
52+
# print("write csv...")
53+
write_csv(RESIZE_TRAIN_IMG_DIR, TRAIN_CSV_DIR)
54+
write_csv(RESIZE_TEST_IMG_DIR, TEST_CSV_DIR)
File renamed without changes.

AutoAi/train.py renamed to AutoAi/main.py

Lines changed: 3 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -1,40 +1,12 @@
1-
import os, csv, cv2
21
from autokeras.image.image_supervised import load_image_dataset, ImageClassifier
32
from keras.models import load_model
43
from keras.utils import plot_model
54
from keras.preprocessing.image import load_img, img_to_array
65
import numpy as np
7-
from imagefolder_splitter import ImageFolderSplitter
6+
import cv2
87
from autokeras.constant import Constant # 微调参数设置 如batch_size Image_size 等
98

109

11-
# write csv
12-
def write_csv(img_dir, csv_dir):
13-
list = []
14-
list.append(['File Name', 'Label'])
15-
for file_name in os.listdir(img_dir):
16-
for img in os.listdir("%s/%s" % (img_dir, file_name)):
17-
item = [file_name + "/" + img, file_name]
18-
list.append(item)
19-
f = open(csv_dir, 'w')
20-
writer = csv.writer(f)
21-
writer.writerows(list)
22-
23-
24-
# resize images
25-
def resize_img(path, data):
26-
for i, img_file in enumerate(data[0]):
27-
cls_name = data[1][i]
28-
img = cv2.imread(img_file)
29-
img = cv2.resize(img, (RESIZE, RESIZE), interpolation=cv2.INTER_LINEAR)
30-
img_name = img_file[img_file.rfind('/') + 1:]
31-
if os.path.exists("%s/%s" % (path, cls_name)):
32-
cv2.imwrite("%s/%s/%s" % (path, cls_name, img_name), img)
33-
else:
34-
os.makedirs("%s/%s" % (path, cls_name))
35-
cv2.imwrite("%s/%s/%s" % (path, cls_name, img_name), img)
36-
37-
3810
def train_autokeras(RESIZE_TRAIN_IMG_DIR, RESIZE_TEST_IMG_DIR, TRAIN_CSV_DIR, TEST_CSV_DIR, TIME):
3911
# Load images
4012
train_data, train_labels = load_image_dataset(csv_file_path=TRAIN_CSV_DIR, images_path=RESIZE_TRAIN_IMG_DIR) # 加载数据
@@ -73,7 +45,6 @@ def predict(MODEL_DIR, PREDICT_IMG_PATH, RESIZE):
7345

7446
if __name__ == "__main__":
7547
# Folder for storing training images
76-
IMG_DIR = '/home/tian/Desktop/spiders/design/design/spiders/image'
7748
# Folder for storing testing images
7849
RESIZE_TRAIN_IMG_DIR = './data/resize/train'
7950
RESIZE_TEST_IMG_DIR = './data/resize/test'
@@ -93,13 +64,6 @@ def predict(MODEL_DIR, PREDICT_IMG_PATH, RESIZE):
9364
RESIZE = 128
9465
# Set the training time, this is half an hour
9566
TIME = 0.5 * 60 * 60 # 训练时间 半小时
96-
splitter = ImageFolderSplitter(IMG_DIR)
97-
# print("Resize images...")
98-
# resize_img(RESIZE_TRAIN_IMG_DIR, splitter.getTrainingDataset())
99-
# resize_img(RESIZE_TEST_IMG_DIR, splitter.getValidationDataset())
100-
# print("write csv...")
101-
# write_csv(RESIZE_TRAIN_IMG_DIR, TRAIN_CSV_DIR)
102-
# write_csv(RESIZE_TEST_IMG_DIR, TEST_CSV_DIR)
10367
print("============Load...=================")
104-
# train_autokeras(RESIZE_TRAIN_IMG_DIR, RESIZE_TEST_IMG_DIR, TRAIN_CSV_DIR, TEST_CSV_DIR, TIME)
105-
predict(MODEL_DIR, PREDICT_IMG_PATH, RESIZE)
68+
# train_autokeras(RESIZE_TRAIN_IMG_DIR, RESIZE_TEST_IMG_DIR, TRAIN_CSV_DIR, TEST_CSV_DIR, TIME) # 训练
69+
predict(MODEL_DIR, PREDICT_IMG_PATH, RESIZE) # 识别

AutoAi/utitls/__init__.py

Whitespace-only changes.
File renamed without changes.

Generate_image/config.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@ class Config(object):
88
image_size = 96 # 图片尺寸
99
batch_size = 32
1010
max_epoch = 4000
11-
lr1 = 0.001 # 2e-4 # 生成器的学习率 2.7*2-4
12-
lr2 = 0.001 # 2e-4 # 判别器的学习率
11+
lr1 = 2e-4 # 2e-4 # 生成器的学习率 2.7*2-4
12+
lr2 = 2e-4 # 2e-4 # 判别器的学习率
1313
beta1 = 0.5 # Adam优化器的beta1参数
1414
use_gpu = True # 是否使用GPU
1515
nz = 100 # 噪声维度

Generate_image/main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ def train(**kwargs):
111111

112112
def generate(**kwargs):
113113
"""
114-
随机生成动漫头像,并根据netd的分数选择较好的
114+
随机生成图像,并根据netd的分数选择较好的
115115
"""
116116
with t.no_grad():
117117
opt._parse(kwargs)

Image_recognition/config.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,11 @@ class DefaultConfig(object):
88
env = 'opalus_recognltion' # visdom 环境
99
vis_port = 8097 # visdom 端口
1010
image_size = 224 # 图片尺寸
11-
model = 'AlexNet1' # 使用的模型,名字必须与models/__init__.py中的名字一致
11+
model = 'ResNet152' # 使用的模型,名字必须与models/__init__.py中的名字一致
1212

1313
data_root = "/home/tian/Desktop/spiders/design/design/spiders/image" # 数据集存放路径
1414
load_model_path = None # 加载训练的模型的路径,为None代表不加载
15-
# load_model_path = './checkpoint/AlexNet1_0130_12-21-31.pth.tar'
15+
# load_model_path = './checkpoint/AlexNet1_0214_10-06-50.pth.tar'
1616

1717
batch_size = 16 # 每批训练数据的个数,显存不足,适当减少
1818
use_gpu = True # user GPU or not
@@ -25,7 +25,7 @@ class DefaultConfig(object):
2525
# pretrained = False # 不加载预训练
2626
pretrained = True # 加载预训练模型
2727

28-
max_epoch = 10 # 学习次数
28+
max_epoch = 20 # 学习次数
2929
lr = 0.001 # initial learning rate
3030
lr_decay = 0.5 # when val_loss increase, lr = lr*lr_decay
3131
weight_decay = 0e-5 # 损失函数

Image_recognition/main.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ def train(**kwargs):
113113
model_name=opt.model,
114114
total=len(train_dataloader))
115115
for ii, (data, labels) in enumerate(train_dataloader):
116-
train_progressor.current = ii
116+
train_progressor.current = ii+1
117117
# train model
118118
input = data.to(opt.device)
119119
target = labels.to(opt.device)
@@ -139,6 +139,7 @@ def train(**kwargs):
139139
train_progressor()
140140
# train_progressor.done() # 保存训练结果为txt
141141
# validate and visualize
142+
print('')
142143
valid_loss = val(model, epoch, criterion, val_dataloader) # 校验模型
143144
is_best = valid_loss[1] > best_precision # 精确度比较,如果此次比上次大  保存模型
144145
best_precision = max(valid_loss[1], best_precision)
@@ -174,7 +175,7 @@ def val(model, epoch, criterion, dataloader):
174175
val_progressor = ProgressBar(mode="Val ", epoch=epoch, total_epoch=opt.max_epoch, model_name=opt.model,
175176
total=len(dataloader))
176177
for ii, (data, labels) in enumerate(dataloader):
177-
val_progressor.current = ii
178+
val_progressor.current = ii+1
178179
input = data.to(opt.device)
179180
labels = labels.to(opt.device)
180181
score = model(input)
@@ -187,6 +188,7 @@ def val(model, epoch, criterion, dataloader):
187188
val_progressor.current_loss = losses.avg
188189
val_progressor.current_top1 = top1.avg
189190
val_progressor()
191+
print('')
190192
# val_progressor.done() # 保存校验结果为txt
191193
return [losses.avg, top1.avg]
192194

Image_recognition/utils/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,4 +46,4 @@ def save_checkpoint(state):
4646
prefix = './checkpoint/' + opt.model + '_'
4747
filename = time.strftime(prefix + '%m%d_%H-%M-%S.pth.tar')
4848
torch.save(state, filename)
49-
print("Get Better top1 : %s saving weights to %s" % (state["best_precision"], filename)) # 打印精确度
49+
# print("Get Better top1 : %s saving weights to %s" % (state["best_precision"], filename)) # 打印精确度

0 commit comments

Comments
 (0)