|
| 1 | +from omegaconf import DictConfig, OmegaConf |
| 2 | + |
| 3 | +from configs.common.train import train # noqa |
| 4 | +from libai.config import LazyCall |
| 5 | +from projects.Llama.adapter.adapter_model import LlamaForCausalLM |
| 6 | +from projects.Llama.tokenizer import LlamaTokenizer |
| 7 | + |
| 8 | +cfg = dict( |
| 9 | + # Model |
| 10 | + hidden_act="silu", |
| 11 | + hidden_size=4096, |
| 12 | + initializer_range=0.02, |
| 13 | + intermediate_size=11008, |
| 14 | + max_position_embeddings=4096, |
| 15 | + num_attention_heads=32, |
| 16 | + hidden_layers=32, |
| 17 | + pretraining_tp=1, |
| 18 | + rms_norm_eps=1e-05, |
| 19 | + rope_scaling=None, |
| 20 | + tie_word_embeddings=False, |
| 21 | + vocab_size=32000, |
| 22 | + use_scaled_init_for_output_weights=False, |
| 23 | + scale_mask_softmax_fusion=False, |
| 24 | + amp_enabled=True, |
| 25 | + # Inference |
| 26 | + is_encoder_decoder=False, |
| 27 | + max_length=256, |
| 28 | + min_length=0, |
| 29 | + do_sample=False, |
| 30 | + early_stopping=False, |
| 31 | + num_beams=1, |
| 32 | + num_beam_groups=1, |
| 33 | + diversity_penalty=0.0, |
| 34 | + temperature=0.9, |
| 35 | + top_k=50, |
| 36 | + top_p=0.6, |
| 37 | + typical_p=1.0, |
| 38 | + repetition_penalty=1.0, |
| 39 | + length_penalty=1.0, |
| 40 | + no_repeat_ngram_size=0, |
| 41 | + encoder_no_repeat_ngram_size=0, |
| 42 | + num_return_sequences=1, |
| 43 | + chunk_size_feed_forward=0, |
| 44 | + output_scores=False, |
| 45 | + use_cache=True, |
| 46 | + bos_token_id=1, |
| 47 | + eos_token_id=2, |
| 48 | + pad_token_id=0, |
| 49 | + # adapter |
| 50 | + adapter_len=10, |
| 51 | + adapter_layer=30, |
| 52 | + # train |
| 53 | + pretrained_model_path="meta-llama/Llama-2-7b-hf/", |
| 54 | +) |
| 55 | + |
| 56 | +cfg = DictConfig(cfg) |
| 57 | + |
| 58 | +model = LazyCall(LlamaForCausalLM)(cfg=cfg) |
| 59 | +tokenization = OmegaConf.create() |
| 60 | +tokenization.make_vocab_size_divisible_by = 1 |
| 61 | +tokenization.tokenizer = LazyCall(LlamaTokenizer)( |
| 62 | + pretrained_model_path="Llama-2-7b-hf/tokenizer.model" |
| 63 | +) |
0 commit comments