Skip to content

Commit

Permalink
auto updates (#7105)
Browse files Browse the repository at this point in the history
Signed-off-by: monai-bot <[email protected]>

Signed-off-by: monai-bot <[email protected]>
  • Loading branch information
monai-bot authored Oct 9, 2023
1 parent 7930f85 commit 2b0a95e
Show file tree
Hide file tree
Showing 7 changed files with 15 additions and 16 deletions.
1 change: 0 additions & 1 deletion monai/apps/auto3dseg/bundle_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@
logger = get_logger(module_name=__name__)
ALGO_HASH = MONAIEnvVars.algo_hash()


__all__ = ["BundleAlgo", "BundleGen"]


Expand Down
10 changes: 5 additions & 5 deletions monai/apps/detection/networks/retinanet_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ def __init__(

for layer in self.conv.children():
if isinstance(layer, conv_type): # type: ignore
torch.nn.init.normal_(layer.weight, std=0.01) # type: ignore
torch.nn.init.constant_(layer.bias, 0) # type: ignore
torch.nn.init.normal_(layer.weight, std=0.01)
torch.nn.init.constant_(layer.bias, 0)

self.cls_logits = conv_type(in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1)
torch.nn.init.normal_(self.cls_logits.weight, std=0.01)
Expand Down Expand Up @@ -167,8 +167,8 @@ def __init__(self, in_channels: int, num_anchors: int, spatial_dims: int):

for layer in self.conv.children():
if isinstance(layer, conv_type): # type: ignore
torch.nn.init.normal_(layer.weight, std=0.01) # type: ignore
torch.nn.init.zeros_(layer.bias) # type: ignore
torch.nn.init.normal_(layer.weight, std=0.01)
torch.nn.init.zeros_(layer.bias)

def forward(self, x: list[Tensor]) -> list[Tensor]:
"""
Expand Down Expand Up @@ -297,7 +297,7 @@ def __init__(
)
self.feature_extractor = feature_extractor

self.feature_map_channels: int = self.feature_extractor.out_channels # type: ignore[assignment]
self.feature_map_channels: int = self.feature_extractor.out_channels
self.num_anchors = num_anchors
self.classification_head = RetinaNetClassificationHead(
self.feature_map_channels, self.num_anchors, self.num_classes, spatial_dims=self.spatial_dims
Expand Down
2 changes: 1 addition & 1 deletion monai/apps/reconstruction/networks/blocks/varnetblock.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def soft_dc(self, x: Tensor, ref_kspace: Tensor, mask: Tensor) -> Tensor:
Returns:
Output of DC block with the same shape as x
"""
return torch.where(mask, x - ref_kspace, self.zeros) * self.dc_weight # type: ignore
return torch.where(mask, x - ref_kspace, self.zeros) * self.dc_weight

def forward(self, current_kspace: Tensor, ref_kspace: Tensor, mask: Tensor, sens_maps: Tensor) -> Tensor:
"""
Expand Down
8 changes: 4 additions & 4 deletions monai/losses/cldice.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,12 @@ def soft_erode(img: torch.Tensor) -> torch.Tensor: # type: ignore
if len(img.shape) == 4:
p1 = -(F.max_pool2d(-img, (3, 1), (1, 1), (1, 0)))
p2 = -(F.max_pool2d(-img, (1, 3), (1, 1), (0, 1)))
return torch.min(p1, p2) # type: ignore
return torch.min(p1, p2)
elif len(img.shape) == 5:
p1 = -(F.max_pool3d(-img, (3, 1, 1), (1, 1, 1), (1, 0, 0)))
p2 = -(F.max_pool3d(-img, (1, 3, 1), (1, 1, 1), (0, 1, 0)))
p3 = -(F.max_pool3d(-img, (1, 1, 3), (1, 1, 1), (0, 0, 1)))
return torch.min(torch.min(p1, p2), p3) # type: ignore
return torch.min(torch.min(p1, p2), p3)


def soft_dilate(img: torch.Tensor) -> torch.Tensor: # type: ignore
Expand All @@ -48,9 +48,9 @@ def soft_dilate(img: torch.Tensor) -> torch.Tensor: # type: ignore
https://github.com/jocpae/clDice/blob/master/cldice_loss/pytorch/soft_skeleton.py#L18
"""
if len(img.shape) == 4:
return F.max_pool2d(img, (3, 3), (1, 1), (1, 1)) # type: ignore
return F.max_pool2d(img, (3, 3), (1, 1), (1, 1))
elif len(img.shape) == 5:
return F.max_pool3d(img, (3, 3, 3), (1, 1, 1), (1, 1, 1)) # type: ignore
return F.max_pool3d(img, (3, 3, 3), (1, 1, 1), (1, 1, 1))


def soft_open(img: torch.Tensor) -> torch.Tensor:
Expand Down
4 changes: 2 additions & 2 deletions monai/networks/blocks/feature_pyramid_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,8 +194,8 @@ def __init__(
conv_type_: type[nn.Module] = Conv[Conv.CONV, spatial_dims]
for m in self.modules():
if isinstance(m, conv_type_):
nn.init.kaiming_uniform_(m.weight, a=1) # type: ignore
nn.init.constant_(m.bias, 0.0) # type: ignore
nn.init.kaiming_uniform_(m.weight, a=1)
nn.init.constant_(m.bias, 0.0)

if extra_blocks is not None:
if not isinstance(extra_blocks, ExtraFPNBlock):
Expand Down
4 changes: 2 additions & 2 deletions monai/networks/nets/swin_unetr.py
Original file line number Diff line number Diff line change
Expand Up @@ -519,7 +519,7 @@ def forward(self, x, mask):
q = q * self.scale
attn = q @ k.transpose(-2, -1)
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.clone()[:n, :n].reshape(-1) # type: ignore
self.relative_position_index.clone()[:n, :n].reshape(-1)
].reshape(n, n, -1)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
attn = attn + relative_position_bias.unsqueeze(0)
Expand Down Expand Up @@ -678,7 +678,7 @@ def load_from(self, weights, n_block, layer):
self.norm1.weight.copy_(weights["state_dict"][root + block_names[0]])
self.norm1.bias.copy_(weights["state_dict"][root + block_names[1]])
self.attn.relative_position_bias_table.copy_(weights["state_dict"][root + block_names[2]])
self.attn.relative_position_index.copy_(weights["state_dict"][root + block_names[3]]) # type: ignore
self.attn.relative_position_index.copy_(weights["state_dict"][root + block_names[3]])
self.attn.qkv.weight.copy_(weights["state_dict"][root + block_names[4]])
self.attn.qkv.bias.copy_(weights["state_dict"][root + block_names[5]])
self.attn.proj.weight.copy_(weights["state_dict"][root + block_names[6]])
Expand Down
2 changes: 1 addition & 1 deletion monai/networks/nets/transchex.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def __init__(self, *inputs, **kwargs) -> None:

def init_bert_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) # type: ignore
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, torch.nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
Expand Down

0 comments on commit 2b0a95e

Please sign in to comment.