-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfiguration_metala.py
58 lines (51 loc) · 1.53 KB
/
configuration_metala.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
# coding=utf-8
""" MetaLA configuration"""
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
class MetaLAConfig(PretrainedConfig):
model_type = "metala"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
pad_token_id=2,
bos_token_id=1,
eos_token_id=2,
vocab_size=32000,
use_cache=True,
init_std=0.02,
# model config
decoder_embed_dim=1024,
decoder_layers=24,
decoder_attention_heads=8,
add_bos_token=False,
causal=True,
glu_act="silu",
glu_dim=5632,
bias=False,
norm_type="simplermsnorm",
no_scale_embedding=True,
**kwargs,
):
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
**kwargs,
)
# hf origin
self.vocab_size = vocab_size
self.use_cache = use_cache
self.init_std = init_std
# add
self.decoder_embed_dim = decoder_embed_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.add_bos_token = add_bos_token
self.causal = causal
self.glu_act = glu_act
self.glu_dim = glu_dim
self.bias = bias
self.norm_type = norm_type
self.no_scale_embedding = no_scale_embedding