Skip to content

Commit 41f2aa1

Browse files
release ssseg v1.5.7
1 parent f4d9800 commit 41f2aa1

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+93
-151
lines changed

ssseg/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
'''version'''
2-
__version__ = '1.5.6'
2+
__version__ = '1.5.7'
33
'''author'''
44
__author__ = 'Zhenchao Jin'
55
'''title'''

ssseg/modules/datasets/base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ def __getitem__(self, index):
4848
'''len'''
4949
def __len__(self):
5050
return len(self.imageids) * self.repeat_times
51-
'''read sample_meta'''
51+
'''read'''
5252
def read(self, imagepath, annpath=None):
5353
# read image
5454
image = cv2.imread(imagepath)

ssseg/modules/datasets/cityscapes.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ def __getitem__(self, index):
5858
sample_meta = self.synctransforms(sample_meta)
5959
# return
6060
return sample_meta
61-
'''format results for test set of Cityscapes'''
61+
'''formatresults'''
6262
@staticmethod
6363
def formatresults(results, filenames, to_label_id=True, savedir='results'):
6464
assert len(filenames) == len(results)

ssseg/modules/datasets/pipelines/evaluation.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ def __init__(self, seg_preds, seg_targets, num_classes, ignore_index=-1, nan_to_
1818
self.all_metric_results = self.totalareatometrics(
1919
total_area_intersect=total_area_intersect, total_area_union=total_area_union, total_area_pred_label=total_area_pred_label, total_area_label=total_area_label, nan_to_num=nan_to_num, beta=beta,
2020
)
21-
'''calculate total intersection and union'''
21+
'''totalintersectandunion'''
2222
@staticmethod
2323
def totalintersectandunion(results, gt_seg_maps, num_classes, ignore_index=-1):
2424
total_area_intersect = torch.zeros((num_classes, ), dtype=torch.float64)
@@ -32,7 +32,7 @@ def totalintersectandunion(results, gt_seg_maps, num_classes, ignore_index=-1):
3232
total_area_pred_label += area_pred_label
3333
total_area_label += area_label
3434
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
35-
'''calculate intersection and union'''
35+
'''intersectandunion'''
3636
@staticmethod
3737
def intersectandunion(pred_label, label, num_classes, ignore_index=-1):
3838
# convert to torch.array
@@ -50,7 +50,7 @@ def intersectandunion(pred_label, label, num_classes, ignore_index=-1):
5050
area_union = area_pred_label + area_label - area_intersect
5151
# return
5252
return area_intersect, area_union, area_pred_label, area_label
53-
'''calculate evaluation metrics'''
53+
'''totalareatometrics'''
5454
@staticmethod
5555
def totalareatometrics(total_area_intersect, total_area_union, total_area_pred_label, total_area_label, nan_to_num=None, beta=1):
5656
# all metrics
@@ -80,7 +80,7 @@ def totalareatometrics(total_area_intersect, total_area_union, total_area_pred_l
8080
all_metric_results['mdice'] = np.nanmean(all_metric_results['dice'])
8181
all_metric_results['mfscore'] = np.nanmean(all_metric_results['fscore'])
8282
return all_metric_results
83-
'''calcuate the f-score value'''
83+
'''calcuatefscore'''
8484
@staticmethod
8585
def calcuatefscore(precision, recall, beta=1):
8686
score = (1 + beta**2) * (precision * recall) / ((beta**2 * precision) + recall)

ssseg/modules/datasets/pipelines/transforms.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -473,11 +473,11 @@ def __call__(self, sample_meta):
473473
sample_meta['image'] = self.hue(sample_meta['image'])
474474
if mode == 0: sample_meta['image'] = self.contrast(sample_meta['image'])
475475
return sample_meta
476-
'''brightness distortion'''
476+
'''brightness'''
477477
def brightness(self, image):
478478
if not np.random.randint(2): return image
479479
return self.convert(image, beta=np.random.uniform(-self.brightness_delta, self.brightness_delta))
480-
'''contrast distortion'''
480+
'''contrast'''
481481
def contrast(self, image):
482482
if not np.random.randint(2): return image
483483
return self.convert(image, alpha=np.random.uniform(self.contrast_lower, self.contrast_upper))
@@ -487,21 +487,21 @@ def rgb2hsv(self, image):
487487
'''hsv2rgb'''
488488
def hsv2rgb(self, image):
489489
return cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
490-
'''saturation distortion'''
490+
'''saturation'''
491491
def saturation(self, image):
492492
if not np.random.randint(2): return image
493493
image = self.rgb2hsv(image)
494494
image[..., 1] = self.convert(image[..., 1], alpha=np.random.uniform(self.saturation_lower, self.saturation_upper))
495495
image = self.hsv2rgb(image)
496496
return image
497-
'''hue distortion'''
497+
'''hue'''
498498
def hue(self, image):
499499
if not np.random.randint(2): return image
500500
image = self.rgb2hsv(image)
501501
image[..., 0] = (image[..., 0].astype(int) + np.random.randint(-self.hue_delta, self.hue_delta)) % 180
502502
image = self.hsv2rgb(image)
503503
return image
504-
'''multiple with alpha and add beat with clip'''
504+
'''convert'''
505505
def convert(self, image, alpha=1, beta=0):
506506
image = image.astype(np.float32) * alpha + beta
507507
image = np.clip(image, 0, 255)

ssseg/modules/models/backbones/bricks/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
Scale, L2Norm, makedivisible, truncnormal
1313
)
1414
from .transformer import (
15-
FFN, MultiheadAttention, nchwtonlc, nlctonchw, PatchEmbed, PatchMerging, AdaptivePadding, PositionEmbeddingSine
15+
FFN, MultiheadAttention, PatchEmbed, PatchMerging, AdaptivePadding, PositionEmbeddingSine, nchwtonlc, nlctonchw, nlc2nchw2nlc, nchw2nlc2nchw
1616
)
1717
from .convolution import (
1818
DynamicConv2d, AdptivePaddingConv2d, SqueezeExcitationConv2d, DepthwiseSeparableConv2d, InvertedResidual, InvertedResidualV3

ssseg/modules/models/backbones/bricks/convolution/apconv.py

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -7,21 +7,16 @@
77
import math
88
import torch.nn as nn
99
import torch.nn.functional as F
10+
from ..activation import BuildActivation
1011
from ..normalization import BuildNormalization
1112

1213

1314
'''AdptivePaddingConv2d'''
1415
class AdptivePaddingConv2d(nn.Conv2d):
1516
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, norm_cfg=None, act_cfg=None):
1617
super(AdptivePaddingConv2d, self).__init__(
17-
in_channels=in_channels,
18-
out_channels=out_channels,
19-
kernel_size=kernel_size,
20-
stride=stride,
21-
padding=0,
22-
dilation=dilation,
23-
groups=groups,
24-
bias=bias
18+
in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=0,
19+
dilation=dilation, groups=groups, bias=bias
2520
)
2621
if norm_cfg is not None:
2722
self.norm = BuildNormalization(placeholder=out_channels, norm_cfg=norm_cfg)

ssseg/modules/models/backbones/bricks/convolution/dsconv.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
Author:
55
Zhenchao Jin
66
'''
7-
import torch
87
import torch.nn as nn
98
from ..activation import BuildActivation
109
from ..normalization import BuildNormalization

ssseg/modules/models/backbones/bricks/convolution/dyconv.py

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -74,23 +74,11 @@ def forward(self, x):
7474
if self.bias is not None:
7575
aggregate_bias = torch.mm(softmax_attention, self.bias).view(-1)
7676
output = F.conv2d(
77-
input=x,
78-
weight=aggregate_weight,
79-
bias=aggregate_bias,
80-
stride=self.stride,
81-
padding=self.padding,
82-
dilation=self.dilation,
83-
groups=self.groups * batch_size,
77+
input=x, weight=aggregate_weight, bias=aggregate_bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups * batch_size,
8478
)
8579
else:
8680
output = F.conv2d(
87-
input=x,
88-
weight=aggregate_weight,
89-
bias=None,
90-
stride=self.stride,
91-
padding=self.padding,
92-
dilation=self.dilation,
93-
groups=self.groups * batch_size,
81+
input=x, weight=aggregate_weight, bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups * batch_size,
9482
)
9583
output = output.view(batch_size, self.out_channels, output.size(-2), output.size(-1))
9684
if hasattr(self, 'norm'): output = self.norm(output)

ssseg/modules/models/backbones/bricks/normalization/layernorm2d.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
Author:
55
Zhenchao Jin
66
'''
7-
import torch
87
import torch.nn as nn
98
import torch.nn.functional as F
109

ssseg/modules/models/backbones/bricks/transformer/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,4 @@
33
from .mha import MultiheadAttention
44
from .pe import PositionEmbeddingSine
55
from .embed import PatchEmbed, PatchMerging, AdaptivePadding
6-
from .misc import nchwtonlc, nlctonchw, nlc2nchw2nlc, nchw2nlc2nchw
6+
from .shape import nchwtonlc, nlctonchw, nlc2nchw2nlc, nchw2nlc2nchw

ssseg/modules/models/backbones/convnext.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
import os
88
import torch
99
import torch.nn as nn
10-
import torch.nn.functional as F
1110
import torch.utils.model_zoo as model_zoo
1211
from functools import partial
1312
from .bricks.dropout.droppath import DropPath

ssseg/modules/models/backbones/hrnet.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -53,18 +53,18 @@ def forward(self, x):
5353
y += self.fuse_layers[i][j](x[j])
5454
x_fuse.append(self.relu(y))
5555
return x_fuse
56-
'''check branches'''
56+
'''checkbranches'''
5757
def checkbranches(self, num_branches, num_blocks, in_channels, num_channels):
5858
assert num_branches == len(num_blocks), 'num_branches should be equal to len(num_blocks)'
5959
assert num_branches == len(num_channels), 'num_branches should be equal to len(num_channels)'
6060
assert num_branches == len(in_channels), 'num_branches should be equal to len(in_channels)'
61-
'''make branches'''
61+
'''makebranches'''
6262
def makebranches(self, num_branches, block, num_blocks, num_channels, norm_cfg=None, act_cfg=None):
6363
branches = []
6464
for i in range(num_branches):
6565
branches.append(self.makebranch(i, block, num_blocks, num_channels, norm_cfg=norm_cfg, act_cfg=act_cfg))
6666
return nn.ModuleList(branches)
67-
'''make one branch'''
67+
'''makebranch'''
6868
def makebranch(self, branch_index, block, num_blocks, num_channels, stride=1, norm_cfg=None, act_cfg=None):
6969
downsample = None
7070
if stride != 1 or self.in_channels[branch_index] != num_channels[branch_index] * block.expansion:
@@ -78,7 +78,7 @@ def makebranch(self, branch_index, block, num_blocks, num_channels, stride=1, no
7878
for i in range(1, num_blocks[branch_index]):
7979
layers.append(block(self.in_channels[branch_index], num_channels[branch_index], norm_cfg=norm_cfg, act_cfg=act_cfg))
8080
return nn.Sequential(*layers)
81-
'''make fuse layer'''
81+
'''makefuselayers'''
8282
def makefuselayers(self, norm_cfg=None, act_cfg=None):
8383
if self.num_branches == 1: return None
8484
num_branches = self.num_branches
@@ -243,7 +243,7 @@ def forward(self, x):
243243
out = torch.cat([F.interpolate(y, size=(h, w), mode='bilinear', align_corners=False) for y in y_list], dim=1)
244244
outs = [out]
245245
return tuple(outs)
246-
'''make stage'''
246+
'''makestage'''
247247
def makestage(self, layer_config, in_channels, multiscale_output=True, norm_cfg=None, act_cfg=None):
248248
num_modules = layer_config['num_modules']
249249
num_branches = layer_config['num_branches']
@@ -258,7 +258,7 @@ def makestage(self, layer_config, in_channels, multiscale_output=True, norm_cfg=
258258
reset_multiscale_output = True
259259
hr_modules.append(HRModule(num_branches, block, num_blocks, in_channels, num_channels, reset_multiscale_output, norm_cfg, act_cfg))
260260
return nn.Sequential(*hr_modules), in_channels
261-
'''make layer'''
261+
'''makelayer'''
262262
def makelayer(self, block, inplanes, planes, num_blocks, stride=1, norm_cfg=None, act_cfg=None):
263263
downsample = None
264264
if stride != 1 or inplanes != planes * block.expansion:
@@ -276,7 +276,7 @@ def makelayer(self, block, inplanes, planes, num_blocks, stride=1, norm_cfg=None
276276
block(inplanes, planes, norm_cfg=norm_cfg, act_cfg=act_cfg)
277277
)
278278
return nn.Sequential(*layers)
279-
'''make transition layer'''
279+
'''maketransitionlayer'''
280280
def maketransitionlayer(self, num_channels_pre_layer, num_channels_cur_layer, norm_cfg=None, act_cfg=None):
281281
num_branches_cur = len(num_channels_cur_layer)
282282
num_branches_pre = len(num_channels_pre_layer)

ssseg/modules/models/backbones/mobilenet.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ def loadpretrainedweights(self, structure_type='mobilenetv3_small', pretrained_m
171171
key = '.'.join(key.split('.')[1:])
172172
state_dict[key] = value
173173
self.load_state_dict(state_dict, strict=False)
174-
'''make layers'''
174+
'''makelayers'''
175175
def makelayers(self, in_channels, arch_type, reduction_factor, outstride, norm_cfg=None, act_cfg=None):
176176
layers, act_cfg_default = [], act_cfg.copy()
177177
# build the first layer

ssseg/modules/models/backbones/resnest.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,7 @@ def __init__(self, structure_type, groups=1, base_width=4, radix=2, reduction_fa
166166
structure_type=structure_type, pretrained_model_path=pretrained_model_path, default_model_urls=DEFAULT_MODEL_URLS
167167
)
168168
self.load_state_dict(state_dict, strict=False)
169-
'''make res layer'''
169+
'''makelayer'''
170170
def makelayer(self, block, inplanes, planes, num_blocks, stride=1, dilation=1, contract_dilation=True, use_avg_for_downsample=False, norm_cfg=None, act_cfg=None):
171171
downsample = None
172172
dilations = [dilation] * num_blocks

ssseg/modules/models/backbones/resnet.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ def __init__(self, structure_type, in_channels=3, base_channels=64, stem_channel
171171
structure_type=structure_type, pretrained_model_path=pretrained_model_path, default_model_urls=DEFAULT_MODEL_URLS
172172
)
173173
self.load_state_dict(state_dict, strict=False)
174-
'''make res layer'''
174+
'''makelayer'''
175175
def makelayer(self, block, inplanes, planes, num_blocks, stride=1, dilation=1, contract_dilation=True, use_avg_for_downsample=False, norm_cfg=None, act_cfg=None):
176176
downsample = None
177177
dilations = [dilation] * num_blocks

ssseg/modules/models/backbones/swin.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -380,7 +380,7 @@ def loadpretrainedweights(self, structure_type='swin_tiny_patch4_window7_224', p
380380
state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0).contiguous()
381381
# load state_dict
382382
self.load_state_dict(state_dict, strict=False)
383-
'''swin convert'''
383+
'''swinconvert'''
384384
@staticmethod
385385
def swinconvert(ckpt):
386386
new_ckpt = OrderedDict()

ssseg/modules/models/backbones/twins.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -252,7 +252,7 @@ def loadpretrainedweights(self, structure_type='pcpvt_small', pretrained_model_p
252252
state_dict = self.twinsconvert(structure_type, state_dict)
253253
# load state_dict
254254
self.load_state_dict(state_dict, strict=False)
255-
'''twins convert'''
255+
'''twinsconvert'''
256256
@staticmethod
257257
def twinsconvert(structure_type, ckpt):
258258
new_ckpt = OrderedDict()

ssseg/modules/models/backbones/unet.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -170,7 +170,7 @@ def forward(self, x):
170170
x = self.decoder[i](enc_outs[i], x)
171171
dec_outs.append(x)
172172
return dec_outs
173-
'''check input divisible'''
173+
'''checkinputdivisible'''
174174
def checkinputdivisible(self, x):
175175
h, w = x.shape[-2:]
176176
whole_downsample_rate = 1

ssseg/modules/models/backbones/vit.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ def loadpretrainedweights(self, structure_type='jx_vit_large_p16_384', pretraine
148148
pos_size = int(math.sqrt(state_dict['pos_embed'].shape[1] - 1))
149149
state_dict['pos_embed'] = self.resizeposembed(state_dict['pos_embed'], (h // self.patch_size, w // self.patch_size), (pos_size, pos_size), self.interpolate_mode)
150150
self.load_state_dict(state_dict, strict=False)
151-
'''vit convert'''
151+
'''vitconvert'''
152152
@staticmethod
153153
def vitconvert(ckpt):
154154
from collections import OrderedDict
@@ -173,7 +173,7 @@ def vitconvert(ckpt):
173173
new_k = k
174174
new_ckpt[new_k] = v
175175
return new_ckpt
176-
'''positiong embeding method'''
176+
'''posembeding'''
177177
def posembeding(self, patched_img, hw_shape, pos_embed):
178178
assert patched_img.ndim == 3 and pos_embed.ndim == 3, 'the shapes of patched_img and pos_embed must be [B, L, C]'
179179
x_len, pos_len = patched_img.shape[1], pos_embed.shape[1]
@@ -185,7 +185,7 @@ def posembeding(self, patched_img, hw_shape, pos_embed):
185185
raise ValueError('Unexpected shape of pos_embed, got {}.'.format(pos_embed.shape))
186186
pos_embed = self.resizeposembed(pos_embed, hw_shape, (pos_h, pos_w), self.interpolate_mode)
187187
return self.drop_after_pos(patched_img + pos_embed)
188-
'''resize pos_embed weights'''
188+
'''resizeposembed'''
189189
@staticmethod
190190
def resizeposembed(pos_embed, input_shpae, pos_shape, mode):
191191
assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]'

ssseg/modules/models/segmentors/annnet/afnblock.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from ...backbones import BuildNormalization
1212

1313

14-
'''Asymmetric Fusion Non-local Block (AFNB)'''
14+
'''AFNBlock'''
1515
class AFNBlock(nn.Module):
1616
def __init__(self, low_in_channels, high_in_channels, transform_channels, out_channels, query_scales, key_pool_scales, norm_cfg=None, act_cfg=None):
1717
super(AFNBlock, self).__init__()

ssseg/modules/models/segmentors/annnet/apnblock.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from ...backbones import BuildActivation, BuildNormalization
1212

1313

14-
'''Asymmetric Pyramid Non-local Block (APNB)'''
14+
'''APNBlock'''
1515
class APNBlock(nn.Module):
1616
def __init__(self, in_channels, transform_channels, out_channels, query_scales, key_pool_scales, norm_cfg=None, act_cfg=None):
1717
super(APNBlock, self).__init__()

ssseg/modules/models/segmentors/annnet/ppm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
import torch.nn as nn
99

1010

11-
'''Pyramid Pooling Module (Concat only)'''
11+
'''PPMConcat'''
1212
class PPMConcat(nn.Module):
1313
def __init__(self, pool_scales):
1414
super(PPMConcat, self).__init__()

ssseg/modules/models/segmentors/base/selfattention.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ def forward(self, query_feats, key_feats):
7272
if self.out_project is not None:
7373
context = self.out_project(context)
7474
return context
75-
'''build project'''
75+
'''buildproject'''
7676
def buildproject(self, in_channels, out_channels, num_convs, use_norm, norm_cfg, act_cfg):
7777
if use_norm:
7878
convs = [nn.Sequential(

ssseg/modules/models/segmentors/emanet/ema.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def forward(self, x):
4343
bases = F.normalize(bases, dim=1, p=2)
4444
self.bases = (1 - self.momentum) * self.bases + self.momentum * bases
4545
return feats_recon
46-
'''reduce mean when distributed training'''
46+
'''reducemean'''
4747
def reducemean(self, tensor):
4848
if not (dist.is_available() and dist.is_initialized()):
4949
return tensor

ssseg/modules/models/segmentors/encnet/contextencoding.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
Zhenchao Jin
66
'''
77
import copy
8-
import torch
98
import torch.nn as nn
109
import torch.nn.functional as F
1110
from .encoding import Encoding

ssseg/modules/models/segmentors/encnet/encnet.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ def forward(self, data_meta):
8080
else:
8181
ssseg_outputs = SSSegOutputStructure(mode=self.mode, seg_logits=seg_logits)
8282
return ssseg_outputs
83-
'''convert to onehot labels'''
83+
'''onehot'''
8484
def onehot(self, labels, num_classes):
8585
batch_size = labels.size(0)
8686
labels_onehot = labels.new_zeros((batch_size, num_classes))

0 commit comments

Comments
 (0)