Skip to content

Commit a68b4af

Browse files
Fix code style with make precommit (#4119)
1 parent 9f0ed8b commit a68b4af

File tree

3 files changed

+7
-8
lines changed

3 files changed

+7
-8
lines changed

scripts/generate_zen_dataset.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -338,7 +338,7 @@ def main(test_size, push_to_hub, repo_id):
338338
"Namespaces are one",
339339
"Although practicality sometimes beats purity,",
340340
],
341-
"completions":[
341+
"completions": [
342342
[", let me think...", " ugly."],
343343
[", of course,", " implicit.", " because clarity matters."],
344344
["... let's keep it basic,", " complex."],
@@ -350,7 +350,7 @@ def main(test_size, push_to_hub, repo_id):
350350
[" some theoretical elegance,", " purity."],
351351
[" silently,", " unless explicitly silenced."],
352352
[" the temptation to guess."],
353-
[" way to do it,"," but sometimes it's not obvious.", " especially when there's more than one possibility."],
353+
[" way to do it,", " but sometimes it's not obvious.", " especially when there's more than one possibility."],
354354
[" clear at first,", " it will eventually emerge."],
355355
[" later."],
356356
[" problematic fixes."],

tests/test_sft_trainer.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1411,10 +1411,9 @@ def test_prompt_tuning(self):
14111411
def test_peft_model_with_quantization(self):
14121412
"""SFTTrainer should not freeze layers of existing PeftModel.
14131413
1414-
This test simulates a realistic QLoRA scenario where a quantized base model
1415-
is first converted to a PeftModel, then passed to SFTTrainer. The issue was
1416-
that prepare_model_for_kbit_training would freeze all parameters including
1417-
the LoRA adapters, making training impossible.
1414+
This test simulates a realistic QLoRA scenario where a quantized base model is first converted to a PeftModel,
1415+
then passed to SFTTrainer. The issue was that prepare_model_for_kbit_training would freeze all parameters
1416+
including the LoRA adapters, making training impossible.
14181417
"""
14191418
# Get the base model
14201419
model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"

trl/trainer/ppo_trainer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -271,7 +271,7 @@ def __init__(
271271
) # note that we are calling `self.lr_scheduler.step()` manually only at the batch level
272272

273273
#########
274-
### trainer specifics
274+
# trainer specifics
275275
#########
276276
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
277277
self.callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
@@ -303,7 +303,7 @@ def __init__(
303303
self.model.add_model_tags(self._tag_names)
304304

305305
#########
306-
### setup dataloader
306+
# setup dataloader
307307
#########
308308
self.dataloader = DataLoader(
309309
self.train_dataset,

0 commit comments

Comments
 (0)