Skip to content

Commit 65bb527

Browse files
Bump segmentation-models-pytorch from 0.4.0 to 0.5.0 in /requirements (#2740)
1 parent c49deda commit 65bb527

File tree

4 files changed

+29
-13
lines changed

4 files changed

+29
-13
lines changed

pyproject.toml

+2-2
Original file line numberDiff line numberDiff line change
@@ -65,8 +65,8 @@ dependencies = [
6565
"rasterio>=1.3.3,!=1.4.0,!=1.4.1,!=1.4.2",
6666
# rtree 1.0.1+ required for Python 3.11 wheels
6767
"rtree>=1.0.1",
68-
# segmentation-models-pytorch 0.3.3+ required for timm 0.8+ support
69-
"segmentation-models-pytorch>=0.3.3",
68+
# segmentation-models-pytorch 0.5+ required for new UnetDecoder API
69+
"segmentation-models-pytorch>=0.5",
7070
# shapely 1.8.5+ required for Python 3.11 wheels
7171
"shapely>=1.8.5",
7272
# timm 0.8+ required for timm.models.adapt_input_conv, 0.9.2 required by SMP

requirements/min-reqs.old

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ pillow==9.2.0
1414
pyproj==3.4.0
1515
rasterio==1.3.11
1616
rtree==1.0.1
17-
segmentation-models-pytorch==0.3.3
17+
segmentation-models-pytorch==0.5.0
1818
shapely==1.8.5
1919
timm==0.9.2
2020
torch==2.0.0

requirements/required.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ pillow==11.2.1
1414
pyproj==3.7.1
1515
rasterio==1.4.3
1616
rtree==1.4.0
17-
segmentation-models-pytorch==0.4.0
17+
segmentation-models-pytorch==0.5.0
1818
shapely==2.0.7
1919
timm==1.0.15
2020
torch==2.6.0

torchgeo/models/fcsiam.py

+25-9
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def __init__(
2626
encoder_name: str = 'resnet34',
2727
encoder_depth: int = 5,
2828
encoder_weights: str | None = 'imagenet',
29-
decoder_use_batchnorm: bool = True,
29+
decoder_use_batchnorm: bool | str | dict[str, Any] = 'batchnorm',
3030
decoder_channels: Sequence[int] = (256, 128, 64, 32, 16),
3131
decoder_attention_type: str | None = None,
3232
in_channels: int = 3,
@@ -50,10 +50,26 @@ def __init__(
5050
decoder_channels: List of integers which specify **in_channels**
5151
parameter for convolutions used in decoder. Length of the list
5252
should be the same as **encoder_depth**
53-
decoder_use_batchnorm: If **True**, BatchNorm2d layer between
54-
Conv2D and Activation layers is used. If **"inplace"** InplaceABN
55-
will be used, allows to decrease memory consumption. Available
56-
options are **True, False, "inplace"**
53+
decoder_use_batchnorm: Specifies normalization between Conv2D and
54+
activation. Accepts the following types:
55+
56+
- **True**: Defaults to `"batchnorm"`.
57+
- **False**: No normalization (`nn.Identity`).
58+
- **str**: Specifies normalization type using default parameters.
59+
Available values: `"batchnorm"`, `"identity"`, `"layernorm"`,
60+
`"instancenorm"`, `"inplace"`.
61+
- **dict**: Fully customizable normalization settings. Structure:
62+
```python
63+
{"type": <norm_type>, **kwargs}
64+
```
65+
where `norm_name` corresponds to normalization type (see above), and
66+
`kwargs` are passed directly to the normalization layer as defined in
67+
PyTorch documentation.
68+
69+
**Example**:
70+
```python
71+
decoder_use_norm={"type": "layernorm", "eps": 1e-2}
72+
```
5773
decoder_attention_type: Attention module used in decoder of the model.
5874
Available options are **None** and **scse**. SCSE paper
5975
https://arxiv.org/abs/1808.08127
@@ -79,9 +95,9 @@ def __init__(
7995
encoder_channels=encoder_out_channels,
8096
decoder_channels=decoder_channels,
8197
n_blocks=encoder_depth,
82-
use_batchnorm=decoder_use_batchnorm,
83-
center=True if encoder_name.startswith('vgg') else False,
98+
use_norm=decoder_use_batchnorm,
8499
attention_type=decoder_attention_type,
100+
add_center_block=True if encoder_name.startswith('vgg') else False,
85101
)
86102

87103
self.segmentation_head = smp.base.SegmentationHead(
@@ -111,7 +127,7 @@ def forward(self, x: Tensor) -> Tensor:
111127
for i in range(1, len(features1))
112128
]
113129
features.insert(0, features2[0])
114-
decoder_output = self.decoder(*features)
130+
decoder_output = self.decoder(features)
115131
masks: Tensor = self.segmentation_head(decoder_output)
116132
return masks
117133

@@ -150,6 +166,6 @@ def forward(self, x: Tensor) -> Tensor:
150166
features1, features2 = self.encoder(x1), self.encoder(x2)
151167
features = [features2[i] - features1[i] for i in range(1, len(features1))]
152168
features.insert(0, features2[0])
153-
decoder_output = self.decoder(*features)
169+
decoder_output = self.decoder(features)
154170
masks: Tensor = self.segmentation_head(decoder_output)
155171
return masks

0 commit comments

Comments
 (0)