We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 2904183 commit 10618bcCopy full SHA for 10618bc
QEfficient/cloud/finetune.py
@@ -30,6 +30,7 @@
30
get_preprocessed_dataset,
31
)
32
from QEfficient.finetune.utils.train_utils import get_longest_seq_length, print_model_size, train
33
+from QEfficient.utils._utils import login_and_download_hf_lm
34
35
try:
36
import torch_qaic # noqa: F401
@@ -76,8 +77,9 @@ def main(**kwargs):
76
77
78
# Load the pre-trained model and setup its configuration
79
# config = AutoConfig.from_pretrained(train_config.model_name)
80
+ pretrained_model_path = login_and_download_hf_lm(train_config.model_name)
81
model = AutoModelForCausalLM.from_pretrained(
- train_config.model_name,
82
+ pretrained_model_path,
83
use_cache=False,
84
attn_implementation="sdpa",
85
torch_dtype=torch.float16,
0 commit comments