protomotions.agents.common.config module

class protomotions.agents.common.config.NormObsBaseConfig(normalize_obs=False, norm_clamp_value=5.0)[source]

Bases: ConfigBuilder

Base configuration for modules that support optional observation normalization.

With LazyLinear, only num_out is needed - input sizes are inferred automatically. This is purely about normalization settings and output dimensions. Individual TensorDictModules add their own obs_key/out_key fields as needed.

normalize_obs: bool = False
norm_clamp_value: float = 5.0
__init__(normalize_obs=False, norm_clamp_value=5.0)
class protomotions.agents.common.config.ModuleOperationConfig[source]

Bases: ConfigBuilder

Configuration for module operations.

__init__()
class protomotions.agents.common.config.ModuleOperationForwardConfig[source]

Bases: ModuleOperationConfig

Configuration for module operation forward.

__init__()
class protomotions.agents.common.config.ModuleOperationPermuteConfig(new_order)[source]

Bases: ModuleOperationConfig

Configuration for module operation permute.

new_order: List[int]
__init__(new_order)
class protomotions.agents.common.config.ModuleOperationReshapeConfig(new_shape)[source]

Bases: ModuleOperationConfig

Configuration for module operation reshape.

new_shape: List[str | int]
__init__(new_shape)
class protomotions.agents.common.config.ModuleOperationSqueezeConfig(squeeze_dim)[source]

Bases: ModuleOperationConfig

Configuration for module operation squeeze.

squeeze_dim: int
__init__(squeeze_dim)
class protomotions.agents.common.config.ModuleOperationUnsqueezeConfig(unsqueeze_dim)[source]

Bases: ModuleOperationConfig

Configuration for module operation unsqueeze.

unsqueeze_dim: int
__init__(unsqueeze_dim)
class protomotions.agents.common.config.ModuleOperationExpandConfig(expand_shape)[source]

Bases: ModuleOperationConfig

Configuration for module operation expand.

expand_shape: List[int]
__init__(expand_shape)
class protomotions.agents.common.config.ModuleOperationSphereProjectionConfig[source]

Bases: ModuleOperationConfig

Configuration for sphere projection operation (L2 normalization to unit sphere).

__init__()
class protomotions.agents.common.config.FlattenConfig(normalize_obs=False, norm_clamp_value=5.0, _target_='protomotions.agents.common.common.Flatten', in_keys=<factory>, out_keys=<factory>, module_operations=<factory>)[source]

Bases: NormObsBaseConfig

Configuration for Flatten module.

in_keys: List[str]
out_keys: List[str]
module_operations: List[ModuleOperationConfig]
__init__(normalize_obs=False, norm_clamp_value=5.0, _target_='protomotions.agents.common.common.Flatten', in_keys=<factory>, out_keys=<factory>, module_operations=<factory>)
class protomotions.agents.common.config.MLPLayerConfig(units=512, activation='relu', use_layer_norm=False)[source]

Bases: ConfigBuilder

Configuration for a single MLP layer.

units: int = 512
activation: str = 'relu'
use_layer_norm: bool = False
__init__(units=512, activation='relu', use_layer_norm=False)
class protomotions.agents.common.config.MLPWithConcatConfig(normalize_obs=False, norm_clamp_value=5.0, num_out=None, layers=None, _target_='protomotions.agents.common.mlp.MLPWithConcat', in_keys=<factory>, out_keys=<factory>, output_activation=None, module_operations=<factory>)[source]

Bases: NormObsBaseConfig

Configuration for Multi-Layer Perceptron with optional normalization.

Unified MLP configuration that supports optional input normalization. Set normalize_obs=False if you don’t want normalization (default is False). obs_key and out_key are optional in config but validated in MLP module.

num_out: int = None
layers: List[MLPLayerConfig] = None
in_keys: List[str]
out_keys: List[str]
output_activation: str | None = None
module_operations: List[ModuleOperationConfig]
__init__(normalize_obs=False, norm_clamp_value=5.0, num_out=None, layers=None, _target_='protomotions.agents.common.mlp.MLPWithConcat', in_keys=<factory>, out_keys=<factory>, output_activation=None, module_operations=<factory>)
class protomotions.agents.common.config.MultiInputModuleConfig(input_models, _target_='protomotions.agents.common.common.MultiInputModule', in_keys=<factory>, out_keys=<factory>)[source]

Bases: ConfigBuilder

Configuration for Multi-Headed MLP.

input_models: List[Any]
in_keys: List[str]
out_keys: List[str]
__init__(input_models, _target_='protomotions.agents.common.common.MultiInputModule', in_keys=<factory>, out_keys=<factory>)
class protomotions.agents.common.config.SequentialModuleConfig(input_models, _target_='protomotions.agents.common.common.SequentialModule', in_keys=<factory>, out_keys=<factory>)[source]

Bases: ConfigBuilder

Configuration for a sequential model.

input_models: List[Any]
in_keys: List[str]
out_keys: List[str]
__init__(input_models, _target_='protomotions.agents.common.common.SequentialModule', in_keys=<factory>, out_keys=<factory>)
class protomotions.agents.common.config.MultiOutputModuleConfig(output_models, _target_='protomotions.agents.common.common.MultiOutputModule', in_keys=<factory>, out_keys=<factory>)[source]

Bases: ConfigBuilder

Configuration for a multi-output model (one input, many outputs).

output_models: List[Any]
in_keys: List[str]
out_keys: List[str]
__init__(output_models, _target_='protomotions.agents.common.common.MultiOutputModule', in_keys=<factory>, out_keys=<factory>)
class protomotions.agents.common.config.TransformerConfig(_target_='protomotions.agents.common.transformer.Transformer', in_keys=<factory>, out_keys=<factory>, input_and_mask_mapping=None, transformer_token_size=512, latent_dim=512, num_heads=4, ff_size=1024, num_layers=4, dropout=0, activation='relu', output_activation=None)[source]

Bases: ConfigBuilder

Configuration for Transformer encoder.

in_keys: List[str]
out_keys: List[str]
input_and_mask_mapping: Dict[str, str] | None = None
transformer_token_size: int = 512
latent_dim: int = 512
num_heads: int = 4
ff_size: int = 1024
num_layers: int = 4
dropout: float = 0
activation: str = 'relu'
output_activation: str | None = None
__init__(_target_='protomotions.agents.common.transformer.Transformer', in_keys=<factory>, out_keys=<factory>, input_and_mask_mapping=None, transformer_token_size=512, latent_dim=512, num_heads=4, ff_size=1024, num_layers=4, dropout=0, activation='relu', output_activation=None)