| | from transformers.configuration_utils import PretrainedConfig |
| |
|
| | class STLConfig(PretrainedConfig): |
| |
|
| | model_type = "stldec64" |
| | keys_to_ignore_at_inference = ["past_key_values"] |
| | attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} |
| |
|
| | def __init__( |
| | self, |
| | vocab_size=35, |
| | decoder_vocab_size=None, |
| | max_position_embeddings=512, |
| | encoder_layers=12, |
| | encoder_ffn_dim=4096, |
| | encoder_attention_heads=16, |
| | decoder_layers=12, |
| | decoder_ffn_dim=4096, |
| | decoder_attention_heads=16, |
| | encoder_layerdrop=0.0, |
| | decoder_layerdrop=0.0, |
| | use_cache=True, |
| | is_encoder_decoder=True, |
| | activation_function="gelu", |
| | d_model=64, |
| | dropout=0.1, |
| | attention_dropout=0.0, |
| | activation_dropout=0.0, |
| | init_std=0.02, |
| | decoder_start_token_id=3, |
| | scale_embedding=False, |
| | pad_token_id=1, |
| | eos_token_id=3, |
| | bos_token_id=2, |
| | forced_eos_token_id=3, |
| | share_encoder_decoder_embeddings=True, |
| | **kwargs, |
| | ): |
| | self.vocab_size = vocab_size |
| | self.decoder_vocab_size = decoder_vocab_size or vocab_size |
| | self.max_position_embeddings = max_position_embeddings |
| | self.d_model = d_model |
| | self.encoder_ffn_dim = encoder_ffn_dim |
| | self.encoder_layers = encoder_layers |
| | self.encoder_attention_heads = encoder_attention_heads |
| | self.decoder_ffn_dim = decoder_ffn_dim |
| | self.decoder_layers = decoder_layers |
| | self.decoder_attention_heads = decoder_attention_heads |
| | self.dropout = dropout |
| | self.attention_dropout = attention_dropout |
| | self.activation_dropout = activation_dropout |
| | self.activation_function = activation_function |
| | self.init_std = init_std |
| | self.encoder_layerdrop = encoder_layerdrop |
| | self.decoder_layerdrop = decoder_layerdrop |
| | self.use_cache = use_cache |
| | self.num_hidden_layers = encoder_layers |
| | self.scale_embedding = scale_embedding |
| | self.share_encoder_decoder_embeddings = share_encoder_decoder_embeddings |
| | super().__init__( |
| | bos_token_id=bos_token_id, |
| | pad_token_id=pad_token_id, |
| | eos_token_id=eos_token_id, |
| | is_encoder_decoder=is_encoder_decoder, |
| | decoder_start_token_id=decoder_start_token_id, |
| | forced_eos_token_id=forced_eos_token_id, |
| | **kwargs, |
| | ) |
| |
|