Skip to content
Open
Show file tree
Hide file tree
Changes from 5 commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions scripts/generate_tiny_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@
LlavaNextForConditionalGeneration,
MistralConfig,
MistralForCausalLM,
NemotronHConfig,
NemotronHForCausalLM,
OPTConfig,
OPTForCausalLM,
PaliGemmaForConditionalGeneration,
Expand Down Expand Up @@ -227,6 +229,32 @@ def init_weights_tiny_model(model):
init_weights_tiny_model(model)
push_to_hub(model, tokenizer, generation_config, "tiny", suffix)

# Hybrid Mamba-Attention models
tokenizer = AutoTokenizer.from_pretrained("nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16")
generation_config = GenerationConfig.from_pretrained("nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16")
config = NemotronHConfig(
vocab_size=len(tokenizer.vocab),
hidden_size=16,
num_attention_heads=4,
num_key_value_heads=2,
intermediate_size=32,
layers_block_type=["mamba", "attention"], # 2 layers: one Mamba + one Attention
mamba_num_heads=4,
mamba_head_dim=8,
mamba_n_groups=1,
ssm_state_size=16,
mamba_d_conv=4,
mamba_expand=2,
n_routed_experts=4,
num_experts_per_tok=2,
moe_intermediate_size=32,
moe_shared_expert_intermediate_size=32,
use_mamba_kernels=False, # CPU-friendly for testing
)
model = NemotronHForCausalLM(config).to(dtype=torch.bfloat16)
init_weights_tiny_model(model)
Comment thread
qgallouedec marked this conversation as resolved.
push_to_hub(model, tokenizer, generation_config, "tiny")
Comment thread
cursor[bot] marked this conversation as resolved.

# Two slightly bigger models, required for vLLM testing
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-32B-Instruct")
generation_config = GenerationConfig.from_pretrained("Qwen/Qwen2.5-32B-Instruct")
Expand Down
11 changes: 11 additions & 0 deletions tests/test_dpo_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,17 +139,28 @@ class TestDPOTrainer(TrlTestCase):
"trl-internal-testing/tiny-Qwen2ForCausalLM-2.5",
"trl-internal-testing/tiny-Qwen3MoeForCausalLM",
"trl-internal-testing/tiny-GptOssForCausalLM",
pytest.param(
"trl-internal-testing/tiny-NemotronHForCausalLM",
marks=pytest.mark.skipif(
Version(transformers.__version__) < Version("5.3.0"),
reason="NemotronH models were introduced in transformers-5.3.0",
),
),
],
)
def test_train(self, model_id):
# Get the dataset
dataset = load_dataset("trl-internal-testing/zen", "standard_preference", split="train")

# NemotronH does not support gradient checkpointing
gradient_checkpointing = "NemotronH" not in model_id

# Initialize the trainer
training_args = DPOConfig(
output_dir=self.tmp_dir,
learning_rate=0.1, # use higher lr because gradients are tiny and default lr can stall updates
report_to="none",
gradient_checkpointing=gradient_checkpointing,
)
trainer = DPOTrainer(model=model_id, args=training_args, train_dataset=dataset)

Expand Down
14 changes: 13 additions & 1 deletion tests/test_sft_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,14 +284,26 @@ def test_init_with_training_arguments(self):
"trl-internal-testing/tiny-GptOssForCausalLM",
"trl-internal-testing/tiny-Qwen2ForCausalLM-2.5",
"trl-internal-testing/tiny-Qwen3MoeForCausalLM",
pytest.param(
"trl-internal-testing/tiny-NemotronHForCausalLM",
marks=pytest.mark.skipif(
Version(transformers.__version__) < Version("5.3.0"),
reason="NemotronH models were introduced in transformers-5.3.0",
),
),
],
)
def test_train(self, model_id):
# Get the dataset
dataset = load_dataset("trl-internal-testing/zen", "standard_language_modeling", split="train")

# NemotronH does not support gradient checkpointing
gradient_checkpointing = "NemotronH" not in model_id

# Initialize the trainer
training_args = SFTConfig(output_dir=self.tmp_dir, report_to="none")
training_args = SFTConfig(
output_dir=self.tmp_dir, report_to="none", gradient_checkpointing=gradient_checkpointing
)
trainer = SFTTrainer(model=model_id, args=training_args, train_dataset=dataset)

# Save the initial parameters to compare them later
Expand Down
Loading