SwinV23D#
- pydantic model vision_architectures.nets.swinv2_3d.SwinV23DPatchMergingConfig[source]#
Bases:
CustomBaseModel
Show JSON schema
{ "title": "SwinV23DPatchMergingConfig", "type": "object", "properties": { "out_dim_ratio": { "title": "Out Dim Ratio", "type": "integer" }, "merge_window_size": { "maxItems": 3, "minItems": 3, "prefixItems": [ { "type": "integer" }, { "type": "integer" }, { "type": "integer" } ], "title": "Merge Window Size", "type": "array" } }, "required": [ "out_dim_ratio", "merge_window_size" ] }
- Config:
arbitrary_types_allowed: bool = True
extra: str = ignore
validate_default: bool = True
validate_assignment: bool = True
validate_return: bool = True
- Fields:
- Validators:
validate_before
»all fields
-
field out_dim_ratio:
int
[Required]# - Validated by:
-
field merge_window_size:
tuple
[int
,int
,int
] [Required]# - Validated by:
- pydantic model vision_architectures.nets.swinv2_3d.SwinV23DPatchSplittingConfig[source]#
Bases:
CustomBaseModel
Show JSON schema
{ "title": "SwinV23DPatchSplittingConfig", "type": "object", "properties": { "out_dim_ratio": { "title": "Out Dim Ratio", "type": "integer" }, "final_window_size": { "maxItems": 3, "minItems": 3, "prefixItems": [ { "type": "integer" }, { "type": "integer" }, { "type": "integer" } ], "title": "Final Window Size", "type": "array" } }, "required": [ "out_dim_ratio", "final_window_size" ] }
- Config:
arbitrary_types_allowed: bool = True
extra: str = ignore
validate_default: bool = True
validate_assignment: bool = True
validate_return: bool = True
- Fields:
- Validators:
validate_before
»all fields
-
field out_dim_ratio:
int
[Required]# - Validated by:
-
field final_window_size:
tuple
[int
,int
,int
] [Required]# - Validated by:
- pydantic model vision_architectures.nets.swinv2_3d.SwinV23DStageConfig[source]#
Bases:
Attention3DWithMLPConfig
Show JSON schema
{ "title": "SwinV23DStageConfig", "type": "object", "properties": { "dim": { "default": 0, "description": "dim at which attention is performed", "title": "Dim", "type": "integer" }, "mlp_ratio": { "default": 4, "title": "Mlp Ratio", "type": "integer" }, "activation": { "default": "gelu", "title": "Activation", "type": "string" }, "mlp_drop_prob": { "default": 0.0, "title": "Mlp Drop Prob", "type": "number" }, "num_heads": { "description": "Number of query heads", "title": "Num Heads", "type": "integer" }, "ratio_q_to_kv_heads": { "default": 1, "title": "Ratio Q To Kv Heads", "type": "integer" }, "logit_scale_learnable": { "default": false, "title": "Logit Scale Learnable", "type": "boolean" }, "attn_drop_prob": { "default": 0.0, "title": "Attn Drop Prob", "type": "number" }, "proj_drop_prob": { "default": 0.0, "title": "Proj Drop Prob", "type": "number" }, "max_attention_batch_size": { "default": -1, "description": "Runs attention by splitting the inputs into chunks of this size. 0 means no chunking. Useful for large inputs during inference.", "title": "Max Attention Batch Size", "type": "integer" }, "norm_location": { "default": "post", "enum": [ "pre", "post" ], "title": "Norm Location", "type": "string" }, "layer_norm_eps": { "default": 1e-06, "title": "Layer Norm Eps", "type": "number" }, "depth": { "title": "Depth", "type": "integer" }, "window_size": { "maxItems": 3, "minItems": 3, "prefixItems": [ { "type": "integer" }, { "type": "integer" }, { "type": "integer" } ], "title": "Window Size", "type": "array" }, "use_relative_position_bias": { "default": true, "title": "Use Relative Position Bias", "type": "boolean" }, "patch_merging": { "anyOf": [ { "$ref": "#/$defs/SwinV23DPatchMergingConfig" }, { "type": "null" } ], "default": null }, "patch_splitting": { "anyOf": [ { "$ref": "#/$defs/SwinV23DPatchSplittingConfig" }, { "type": "null" } ], "default": null }, "in_dim": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "default": null, "title": "In Dim" }, "out_dim": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "default": null, "title": "Out Dim" } }, "$defs": { "SwinV23DPatchMergingConfig": { "properties": { "out_dim_ratio": { "title": "Out Dim Ratio", "type": "integer" }, "merge_window_size": { "maxItems": 3, "minItems": 3, "prefixItems": [ { "type": "integer" }, { "type": "integer" }, { "type": "integer" } ], "title": "Merge Window Size", "type": "array" } }, "required": [ "out_dim_ratio", "merge_window_size" ], "title": "SwinV23DPatchMergingConfig", "type": "object" }, "SwinV23DPatchSplittingConfig": { "properties": { "out_dim_ratio": { "title": "Out Dim Ratio", "type": "integer" }, "final_window_size": { "maxItems": 3, "minItems": 3, "prefixItems": [ { "type": "integer" }, { "type": "integer" }, { "type": "integer" } ], "title": "Final Window Size", "type": "array" } }, "required": [ "out_dim_ratio", "final_window_size" ], "title": "SwinV23DPatchSplittingConfig", "type": "object" } }, "required": [ "num_heads", "depth", "window_size" ] }
- Config:
arbitrary_types_allowed: bool = True
extra: str = ignore
validate_default: bool = True
validate_assignment: bool = True
validate_return: bool = True
- Fields:
- Validators:
-
field depth:
int
[Required]# - Validated by:
-
field window_size:
tuple
[int
,int
,int
] [Required]# - Validated by:
-
field use_relative_position_bias:
bool
= True# - Validated by:
-
field patch_merging:
SwinV23DPatchMergingConfig
|None
= None# - Validated by:
-
field patch_splitting:
SwinV23DPatchSplittingConfig
|None
= None# - Validated by:
-
field in_dim:
int
|None
= None# - Validated by:
-
field dim:
int
= 0# dim at which attention is performed
- Validated by:
-
field out_dim:
int
|None
= None# - Validated by:
-
field logit_scale_learnable:
bool
= False# - Validated by:
- property spatial_compression_ratio#
- pydantic model vision_architectures.nets.swinv2_3d.SwinV23DDecoderConfig[source]#
Bases:
CustomBaseModel
Show JSON schema
{ "title": "SwinV23DDecoderConfig", "type": "object", "properties": { "dim": { "title": "Dim", "type": "integer" }, "stages": { "items": { "$ref": "#/$defs/SwinV23DStageConfig" }, "title": "Stages", "type": "array" }, "drop_prob": { "default": 0.0, "title": "Drop Prob", "type": "number" }, "embed_spacing_info": { "default": false, "title": "Embed Spacing Info", "type": "boolean" } }, "$defs": { "SwinV23DPatchMergingConfig": { "properties": { "out_dim_ratio": { "title": "Out Dim Ratio", "type": "integer" }, "merge_window_size": { "maxItems": 3, "minItems": 3, "prefixItems": [ { "type": "integer" }, { "type": "integer" }, { "type": "integer" } ], "title": "Merge Window Size", "type": "array" } }, "required": [ "out_dim_ratio", "merge_window_size" ], "title": "SwinV23DPatchMergingConfig", "type": "object" }, "SwinV23DPatchSplittingConfig": { "properties": { "out_dim_ratio": { "title": "Out Dim Ratio", "type": "integer" }, "final_window_size": { "maxItems": 3, "minItems": 3, "prefixItems": [ { "type": "integer" }, { "type": "integer" }, { "type": "integer" } ], "title": "Final Window Size", "type": "array" } }, "required": [ "out_dim_ratio", "final_window_size" ], "title": "SwinV23DPatchSplittingConfig", "type": "object" }, "SwinV23DStageConfig": { "properties": { "dim": { "default": 0, "description": "dim at which attention is performed", "title": "Dim", "type": "integer" }, "mlp_ratio": { "default": 4, "title": "Mlp Ratio", "type": "integer" }, "activation": { "default": "gelu", "title": "Activation", "type": "string" }, "mlp_drop_prob": { "default": 0.0, "title": "Mlp Drop Prob", "type": "number" }, "num_heads": { "description": "Number of query heads", "title": "Num Heads", "type": "integer" }, "ratio_q_to_kv_heads": { "default": 1, "title": "Ratio Q To Kv Heads", "type": "integer" }, "logit_scale_learnable": { "default": false, "title": "Logit Scale Learnable", "type": "boolean" }, "attn_drop_prob": { "default": 0.0, "title": "Attn Drop Prob", "type": "number" }, "proj_drop_prob": { "default": 0.0, "title": "Proj Drop Prob", "type": "number" }, "max_attention_batch_size": { "default": -1, "description": "Runs attention by splitting the inputs into chunks of this size. 0 means no chunking. Useful for large inputs during inference.", "title": "Max Attention Batch Size", "type": "integer" }, "norm_location": { "default": "post", "enum": [ "pre", "post" ], "title": "Norm Location", "type": "string" }, "layer_norm_eps": { "default": 1e-06, "title": "Layer Norm Eps", "type": "number" }, "depth": { "title": "Depth", "type": "integer" }, "window_size": { "maxItems": 3, "minItems": 3, "prefixItems": [ { "type": "integer" }, { "type": "integer" }, { "type": "integer" } ], "title": "Window Size", "type": "array" }, "use_relative_position_bias": { "default": true, "title": "Use Relative Position Bias", "type": "boolean" }, "patch_merging": { "anyOf": [ { "$ref": "#/$defs/SwinV23DPatchMergingConfig" }, { "type": "null" } ], "default": null }, "patch_splitting": { "anyOf": [ { "$ref": "#/$defs/SwinV23DPatchSplittingConfig" }, { "type": "null" } ], "default": null }, "in_dim": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "default": null, "title": "In Dim" }, "out_dim": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "default": null, "title": "Out Dim" } }, "required": [ "num_heads", "depth", "window_size" ], "title": "SwinV23DStageConfig", "type": "object" } }, "required": [ "dim", "stages" ] }
- Config:
arbitrary_types_allowed: bool = True
extra: str = ignore
validate_default: bool = True
validate_assignment: bool = True
validate_return: bool = True
- Fields:
- Validators:
validate
»all fields
-
field dim:
int
[Required]# - Validated by:
-
field stages:
list
[SwinV23DStageConfig
] [Required]# - Validated by:
-
field drop_prob:
float
= 0.0# - Validated by:
-
field embed_spacing_info:
bool
= False# - Validated by:
- pydantic model vision_architectures.nets.swinv2_3d.SwinV23DConfig[source]#
Bases:
SwinV23DDecoderConfig
Show JSON schema
{ "title": "SwinV23DConfig", "type": "object", "properties": { "dim": { "title": "Dim", "type": "integer" }, "stages": { "items": { "$ref": "#/$defs/SwinV23DStageConfig" }, "title": "Stages", "type": "array" }, "drop_prob": { "default": 0.0, "title": "Drop Prob", "type": "number" }, "embed_spacing_info": { "default": false, "title": "Embed Spacing Info", "type": "boolean" }, "in_channels": { "title": "In Channels", "type": "integer" }, "patch_size": { "maxItems": 3, "minItems": 3, "prefixItems": [ { "type": "integer" }, { "type": "integer" }, { "type": "integer" } ], "title": "Patch Size", "type": "array" }, "image_size": { "anyOf": [ { "maxItems": 3, "minItems": 3, "prefixItems": [ { "type": "integer" }, { "type": "integer" }, { "type": "integer" } ], "type": "array" }, { "type": "null" } ], "default": null, "description": "required for learnable absolute position embeddings", "title": "Image Size" }, "use_absolute_position_embeddings": { "default": true, "title": "Use Absolute Position Embeddings", "type": "boolean" }, "learnable_absolute_position_embeddings": { "default": false, "title": "Learnable Absolute Position Embeddings", "type": "boolean" } }, "$defs": { "SwinV23DPatchMergingConfig": { "properties": { "out_dim_ratio": { "title": "Out Dim Ratio", "type": "integer" }, "merge_window_size": { "maxItems": 3, "minItems": 3, "prefixItems": [ { "type": "integer" }, { "type": "integer" }, { "type": "integer" } ], "title": "Merge Window Size", "type": "array" } }, "required": [ "out_dim_ratio", "merge_window_size" ], "title": "SwinV23DPatchMergingConfig", "type": "object" }, "SwinV23DPatchSplittingConfig": { "properties": { "out_dim_ratio": { "title": "Out Dim Ratio", "type": "integer" }, "final_window_size": { "maxItems": 3, "minItems": 3, "prefixItems": [ { "type": "integer" }, { "type": "integer" }, { "type": "integer" } ], "title": "Final Window Size", "type": "array" } }, "required": [ "out_dim_ratio", "final_window_size" ], "title": "SwinV23DPatchSplittingConfig", "type": "object" }, "SwinV23DStageConfig": { "properties": { "dim": { "default": 0, "description": "dim at which attention is performed", "title": "Dim", "type": "integer" }, "mlp_ratio": { "default": 4, "title": "Mlp Ratio", "type": "integer" }, "activation": { "default": "gelu", "title": "Activation", "type": "string" }, "mlp_drop_prob": { "default": 0.0, "title": "Mlp Drop Prob", "type": "number" }, "num_heads": { "description": "Number of query heads", "title": "Num Heads", "type": "integer" }, "ratio_q_to_kv_heads": { "default": 1, "title": "Ratio Q To Kv Heads", "type": "integer" }, "logit_scale_learnable": { "default": false, "title": "Logit Scale Learnable", "type": "boolean" }, "attn_drop_prob": { "default": 0.0, "title": "Attn Drop Prob", "type": "number" }, "proj_drop_prob": { "default": 0.0, "title": "Proj Drop Prob", "type": "number" }, "max_attention_batch_size": { "default": -1, "description": "Runs attention by splitting the inputs into chunks of this size. 0 means no chunking. Useful for large inputs during inference.", "title": "Max Attention Batch Size", "type": "integer" }, "norm_location": { "default": "post", "enum": [ "pre", "post" ], "title": "Norm Location", "type": "string" }, "layer_norm_eps": { "default": 1e-06, "title": "Layer Norm Eps", "type": "number" }, "depth": { "title": "Depth", "type": "integer" }, "window_size": { "maxItems": 3, "minItems": 3, "prefixItems": [ { "type": "integer" }, { "type": "integer" }, { "type": "integer" } ], "title": "Window Size", "type": "array" }, "use_relative_position_bias": { "default": true, "title": "Use Relative Position Bias", "type": "boolean" }, "patch_merging": { "anyOf": [ { "$ref": "#/$defs/SwinV23DPatchMergingConfig" }, { "type": "null" } ], "default": null }, "patch_splitting": { "anyOf": [ { "$ref": "#/$defs/SwinV23DPatchSplittingConfig" }, { "type": "null" } ], "default": null }, "in_dim": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "default": null, "title": "In Dim" }, "out_dim": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "default": null, "title": "Out Dim" } }, "required": [ "num_heads", "depth", "window_size" ], "title": "SwinV23DStageConfig", "type": "object" } }, "required": [ "dim", "stages", "in_channels", "patch_size" ] }
- Config:
arbitrary_types_allowed: bool = True
extra: str = ignore
validate_default: bool = True
validate_assignment: bool = True
validate_return: bool = True
- Fields:
- Validators:
validate
»all fields
-
field in_channels:
int
[Required]# - Validated by:
-
field patch_size:
tuple
[int
,int
,int
] [Required]# - Validated by:
-
field image_size:
tuple
[int
,int
,int
] |None
= None# required for learnable absolute position embeddings
- Validated by:
-
field use_absolute_position_embeddings:
bool
= True# - Validated by:
-
field learnable_absolute_position_embeddings:
bool
= False# - Validated by:
- pydantic model vision_architectures.nets.swinv2_3d.Swin3DMIMConfig[source]#
Bases:
SwinV23DConfig
Show JSON schema
{ "title": "Swin3DMIMConfig", "type": "object", "properties": { "dim": { "title": "Dim", "type": "integer" }, "stages": { "items": { "$ref": "#/$defs/SwinV23DStageConfig" }, "title": "Stages", "type": "array" }, "drop_prob": { "default": 0.0, "title": "Drop Prob", "type": "number" }, "embed_spacing_info": { "default": false, "title": "Embed Spacing Info", "type": "boolean" }, "in_channels": { "title": "In Channels", "type": "integer" }, "patch_size": { "maxItems": 3, "minItems": 3, "prefixItems": [ { "type": "integer" }, { "type": "integer" }, { "type": "integer" } ], "title": "Patch Size", "type": "array" }, "image_size": { "anyOf": [ { "maxItems": 3, "minItems": 3, "prefixItems": [ { "type": "integer" }, { "type": "integer" }, { "type": "integer" } ], "type": "array" }, { "type": "null" } ], "default": null, "description": "required for learnable absolute position embeddings", "title": "Image Size" }, "use_absolute_position_embeddings": { "default": true, "title": "Use Absolute Position Embeddings", "type": "boolean" }, "learnable_absolute_position_embeddings": { "default": false, "title": "Learnable Absolute Position Embeddings", "type": "boolean" }, "mim": { "additionalProperties": true, "title": "Mim", "type": "object" } }, "$defs": { "SwinV23DPatchMergingConfig": { "properties": { "out_dim_ratio": { "title": "Out Dim Ratio", "type": "integer" }, "merge_window_size": { "maxItems": 3, "minItems": 3, "prefixItems": [ { "type": "integer" }, { "type": "integer" }, { "type": "integer" } ], "title": "Merge Window Size", "type": "array" } }, "required": [ "out_dim_ratio", "merge_window_size" ], "title": "SwinV23DPatchMergingConfig", "type": "object" }, "SwinV23DPatchSplittingConfig": { "properties": { "out_dim_ratio": { "title": "Out Dim Ratio", "type": "integer" }, "final_window_size": { "maxItems": 3, "minItems": 3, "prefixItems": [ { "type": "integer" }, { "type": "integer" }, { "type": "integer" } ], "title": "Final Window Size", "type": "array" } }, "required": [ "out_dim_ratio", "final_window_size" ], "title": "SwinV23DPatchSplittingConfig", "type": "object" }, "SwinV23DStageConfig": { "properties": { "dim": { "default": 0, "description": "dim at which attention is performed", "title": "Dim", "type": "integer" }, "mlp_ratio": { "default": 4, "title": "Mlp Ratio", "type": "integer" }, "activation": { "default": "gelu", "title": "Activation", "type": "string" }, "mlp_drop_prob": { "default": 0.0, "title": "Mlp Drop Prob", "type": "number" }, "num_heads": { "description": "Number of query heads", "title": "Num Heads", "type": "integer" }, "ratio_q_to_kv_heads": { "default": 1, "title": "Ratio Q To Kv Heads", "type": "integer" }, "logit_scale_learnable": { "default": false, "title": "Logit Scale Learnable", "type": "boolean" }, "attn_drop_prob": { "default": 0.0, "title": "Attn Drop Prob", "type": "number" }, "proj_drop_prob": { "default": 0.0, "title": "Proj Drop Prob", "type": "number" }, "max_attention_batch_size": { "default": -1, "description": "Runs attention by splitting the inputs into chunks of this size. 0 means no chunking. Useful for large inputs during inference.", "title": "Max Attention Batch Size", "type": "integer" }, "norm_location": { "default": "post", "enum": [ "pre", "post" ], "title": "Norm Location", "type": "string" }, "layer_norm_eps": { "default": 1e-06, "title": "Layer Norm Eps", "type": "number" }, "depth": { "title": "Depth", "type": "integer" }, "window_size": { "maxItems": 3, "minItems": 3, "prefixItems": [ { "type": "integer" }, { "type": "integer" }, { "type": "integer" } ], "title": "Window Size", "type": "array" }, "use_relative_position_bias": { "default": true, "title": "Use Relative Position Bias", "type": "boolean" }, "patch_merging": { "anyOf": [ { "$ref": "#/$defs/SwinV23DPatchMergingConfig" }, { "type": "null" } ], "default": null }, "patch_splitting": { "anyOf": [ { "$ref": "#/$defs/SwinV23DPatchSplittingConfig" }, { "type": "null" } ], "default": null }, "in_dim": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "default": null, "title": "In Dim" }, "out_dim": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "default": null, "title": "Out Dim" } }, "required": [ "num_heads", "depth", "window_size" ], "title": "SwinV23DStageConfig", "type": "object" } }, "required": [ "dim", "stages", "in_channels", "patch_size", "mim" ] }
- Config:
arbitrary_types_allowed: bool = True
extra: str = ignore
validate_default: bool = True
validate_assignment: bool = True
validate_return: bool = True
- Fields:
- Validators:
-
field mim:
dict
[Required]# - Validated by:
- class vision_architectures.nets.swinv2_3d.SwinV23DLayerLogitScale(num_heads)[source]#
Bases:
Module
- __init__(num_heads)[source]#
Initialize internal Module state, shared by both nn.Module and ScriptModule.
- forward()[source]#
Define the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
- class vision_architectures.nets.swinv2_3d.SwinV23DLayer(config={}, checkpointing_level=0, **kwargs)[source]#
Bases:
Module
- __init__(config={}, checkpointing_level=0, **kwargs)[source]#
Initialize internal Module state, shared by both nn.Module and ScriptModule.
- forward(*args, **kwargs)[source]#
Define the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
- class vision_architectures.nets.swinv2_3d.SwinV23DBlock(config={}, checkpointing_level=0, **kwargs)[source]#
Bases:
Module
- class vision_architectures.nets.swinv2_3d.SwinV23DPatchMerging(merge_window_size, in_dim, out_dim, checkpointing_level=0)[source]#
Bases:
Module
- __init__(merge_window_size, in_dim, out_dim, checkpointing_level=0)[source]#
Initialize internal Module state, shared by both nn.Module and ScriptModule.
- forward(*args, **kwargs)[source]#
Define the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
- class vision_architectures.nets.swinv2_3d.SwinV23DPatchSplitting(final_window_size, in_dim, out_dim, checkpointing_level=0)[source]#
Bases:
Module
- __init__(final_window_size, in_dim, out_dim, checkpointing_level=0)[source]#
Initialize internal Module state, shared by both nn.Module and ScriptModule.
- forward(*args, **kwargs)[source]#
Define the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
- class vision_architectures.nets.swinv2_3d.SwinV23DStage(stage_config, checkpointing_level=0)[source]#
Bases:
Module
- __init__(stage_config, checkpointing_level=0)[source]#
Initialize internal Module state, shared by both nn.Module and ScriptModule.
- forward(*args, **kwargs)[source]#
Define the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
- class vision_architectures.nets.swinv2_3d.SwinV23DEncoder(config, checkpointing_level=0)[source]#
Bases:
Module
,PyTorchModelHubMixin
- __init__(config, checkpointing_level=0)[source]#
Initialize internal Module state, shared by both nn.Module and ScriptModule.
- forward(*args, **kwargs)[source]#
Define the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
- class vision_architectures.nets.swinv2_3d.SwinV23DDecoder(config, checkpointing_level=0)[source]#
Bases:
Module
,PyTorchModelHubMixin
- __init__(config, checkpointing_level=0)[source]#
Initialize internal Module state, shared by both nn.Module and ScriptModule.
- forward(*args, **kwargs)[source]#
Define the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
- class vision_architectures.nets.swinv2_3d.SwinV23DModel(config, checkpointing_level=0)[source]#
Bases:
Module
,PyTorchModelHubMixin
- __init__(config, checkpointing_level=0)[source]#
Initialize internal Module state, shared by both nn.Module and ScriptModule.
- forward(pixel_values, spacings=None, crop_offsets=None, channels_first=True, return_intermediates=False)[source]#
Define the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
- class vision_architectures.nets.swinv2_3d.SwinV23DReconstructionDecoder(config)[source]#
Bases:
Module
- __init__(config)[source]#
Initialize internal Module state, shared by both nn.Module and ScriptModule.
- forward(encodings)[source]#
Define the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
- class vision_architectures.nets.swinv2_3d.SwinV23DMIM(swin_config, decoder_config, mim_config)[source]#
Bases:
Module
- class vision_architectures.nets.swinv2_3d.SwinV23DSimMIM(*args, **kwargs)[source]#
Bases:
SwinV23DMIM
,PyTorchModelHubMixin
- __init__(swin_config, decoder_config, mim_config)[source]#
Initialize internal Module state, shared by both nn.Module and ScriptModule.
- forward(pixel_values, spacings=None)[source]#
Define the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
- class vision_architectures.nets.swinv2_3d.SwinV23DVAEMIM(*args, **kwargs)[source]#
Bases:
SwinV23DMIM
,PyTorchModelHubMixin
- __init__(swin_config, decoder_config, mim_config)[source]#
Initialize internal Module state, shared by both nn.Module and ScriptModule.
- forward(pixel_values, spacings=None, reconstruction_loss_type='l2')[source]#
Define the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.