Skip to content

Commit 68bc4ed

Browse files
committed
hotfixes with metrics
1 parent 06a1595 commit 68bc4ed

File tree

4 files changed

+243
-74
lines changed

4 files changed

+243
-74
lines changed

src/llamafactory/train/sft/metric.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ def pscp(
112112
cost_memory = _pscp_cost(memory, c_m, beta_m)
113113

114114
value = performance * cost_params * cost_flops * cost_memory
115-
return float(np.round(value, 2))
115+
return float(np.round(value, 6))
116116

117117

118118
def f1(preds, targets, valid_labels: Optional[list[str]] = None):

src/llamafactory/webui/components/eval.py

Lines changed: 114 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,18 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15+
from dataclasses import fields
1516
from typing import TYPE_CHECKING
1617

18+
from ...extras.constants import (
19+
ADAPTERS_CONFIG_MAPPING,
20+
CUSTOM_PEFT_CONFIG_MAPPING,
21+
PEFT_CONFIG_MAPPING,
22+
)
1723
from ...extras.packages import is_gradio_available
1824
from ..common import DEFAULT_DATA_DIR
1925
from ..control import list_datasets
26+
from ..locales import LOCALES
2027
from .data import create_preview_box
2128

2229

@@ -74,6 +81,113 @@ def create_eval_tab(engine: "Engine") -> dict[str, "Component"]:
7481
)
7582
)
7683

84+
with gr.Accordion(open=False) as peft_tab:
85+
with gr.Row():
86+
task_type = gr.Dropdown(
87+
choices=["SEQ_CLS", "SEQ_2_SEQ_LM", "CAUSAL_LM", "TOKEN_CLS", "QUESTION_ANS", "FEATURE_EXTRACTION"],
88+
value="CAUSAL_LM",
89+
)
90+
inference_mode = gr.Checkbox()
91+
92+
elem_dict.update(
93+
dict(
94+
peft_tab=peft_tab,
95+
task_type=task_type,
96+
inference_mode=inference_mode,
97+
)
98+
)
99+
100+
input_elems.update(
101+
{
102+
task_type,
103+
inference_mode,
104+
}
105+
)
106+
107+
peft_common_config_values = [
108+
"base_model_name_or_path",
109+
"revision",
110+
"peft_type",
111+
"task_type",
112+
"inference_mode",
113+
"auto_mapping",
114+
"num_transformer_submodules",
115+
"num_attention_heads",
116+
"num_layers",
117+
"modules_to_save",
118+
"token_dim",
119+
]
120+
for peft_config_name in PEFT_CONFIG_MAPPING:
121+
with gr.Accordion(open=False) as peft_method_tab:
122+
peft_name = peft_config_name.lower().replace(" ", "_")
123+
124+
elem_dict.update({peft_name: peft_method_tab})
125+
126+
LOCALES.update({peft_name: {"en": {"label": f"{peft_config_name} configurations"}}})
127+
128+
for field in fields(PEFT_CONFIG_MAPPING[peft_config_name]):
129+
if field.name in peft_common_config_values:
130+
continue
131+
132+
with gr.Row():
133+
if field.type is bool:
134+
elem = gr.Checkbox()
135+
if field.type in [float, int]:
136+
elem = gr.Number()
137+
else:
138+
elem = gr.Textbox()
139+
140+
elem_dict.update({f"{peft_name}_{field.name}": elem})
141+
input_elems.update({elem})
142+
143+
LOCALES.update({f"{peft_name}_{field.name}": {"en": {"label": field.name}}})
144+
145+
for peft_config_name in ADAPTERS_CONFIG_MAPPING:
146+
with gr.Accordion(open=False) as peft_method_tab:
147+
peft_name = peft_config_name.lower().replace(" ", "_")
148+
149+
elem_dict.update({peft_name: peft_method_tab})
150+
151+
LOCALES.update({peft_name: {"en": {"label": f"{peft_config_name} configurations"}}})
152+
153+
for field in fields(ADAPTERS_CONFIG_MAPPING[peft_config_name]):
154+
if field.name in peft_common_config_values:
155+
continue
156+
157+
with gr.Row():
158+
if field.type is bool:
159+
elem = gr.Checkbox()
160+
else:
161+
elem = gr.Textbox()
162+
163+
elem_dict.update({f"{peft_name}_{field.name}": elem})
164+
input_elems.update({elem})
165+
166+
LOCALES.update({f"{peft_name}_{field.name}": {"en": {"label": field.name}}})
167+
168+
for peft_config_name in CUSTOM_PEFT_CONFIG_MAPPING:
169+
with gr.Accordion(open=False) as peft_method_tab:
170+
peft_name = peft_config_name.lower().replace(" ", "_")
171+
172+
elem_dict.update({peft_name: peft_method_tab})
173+
174+
LOCALES.update({peft_name: {"en": {"label": f"{peft_config_name} configurations"}}})
175+
176+
for field in fields(CUSTOM_PEFT_CONFIG_MAPPING[peft_config_name]):
177+
if field.name in peft_common_config_values:
178+
continue
179+
180+
with gr.Row():
181+
if field.type is bool:
182+
elem = gr.Checkbox()
183+
else:
184+
elem = gr.Textbox()
185+
186+
elem_dict.update({f"{peft_name}_{field.name}": elem})
187+
input_elems.update({elem})
188+
189+
LOCALES.update({f"{peft_name}_{field.name}": {"en": {"label": field.name}}})
190+
77191
with gr.Row():
78192
cutoff_len = gr.Slider(minimum=4, maximum=131072, value=1024, step=1)
79193
max_samples = gr.Textbox(value="100000")

src/llamafactory/webui/components/train.py

Lines changed: 75 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -226,102 +226,104 @@ def create_train_tab(engine: "Engine") -> dict[str, "Component"]:
226226
)
227227
inference_mode = gr.Checkbox()
228228

229-
elem_dict.update(
230-
dict(
231-
peft_tab=peft_tab,
232-
task_type=task_type,
233-
inference_mode=inference_mode,
229+
elem_dict.update(
230+
dict(
231+
peft_tab=peft_tab,
232+
task_type=task_type,
233+
inference_mode=inference_mode,
234+
)
234235
)
235-
)
236236

237-
input_elems.update(
238-
{
239-
task_type,
240-
inference_mode,
241-
}
242-
)
237+
input_elems.update(
238+
{
239+
task_type,
240+
inference_mode,
241+
}
242+
)
243243

244-
peft_common_config_values = [
245-
"base_model_name_or_path",
246-
"revision",
247-
"peft_type",
248-
"task_type",
249-
"inference_mode",
250-
"auto_mapping",
251-
"num_transformer_submodules",
252-
"num_attention_heads",
253-
"num_layers",
254-
"modules_to_save",
255-
"token_dim",
256-
]
257-
for peft_config_name in PEFT_CONFIG_MAPPING:
258-
with gr.Accordion(open=False) as peft_method_tab:
259-
peft_name = peft_config_name.lower().replace(" ", "_")
244+
peft_common_config_values = [
245+
"base_model_name_or_path",
246+
"revision",
247+
"peft_type",
248+
"task_type",
249+
"inference_mode",
250+
"auto_mapping",
251+
"num_transformer_submodules",
252+
"num_attention_heads",
253+
"num_layers",
254+
"modules_to_save",
255+
"token_dim",
256+
]
257+
for peft_config_name in PEFT_CONFIG_MAPPING:
258+
with gr.Accordion(open=False) as peft_method_tab:
259+
peft_name = peft_config_name.lower().replace(" ", "_")
260260

261-
elem_dict.update({peft_name: peft_method_tab})
261+
elem_dict.update({peft_name: peft_method_tab})
262262

263-
LOCALES.update({peft_name: {"en": {"label": f"{peft_config_name} configurations"}}})
263+
LOCALES.update({peft_name: {"en": {"label": f"{peft_config_name} configurations"}}})
264264

265-
for field in fields(PEFT_CONFIG_MAPPING[peft_config_name]):
266-
if field.name in peft_common_config_values:
267-
continue
265+
for field in fields(PEFT_CONFIG_MAPPING[peft_config_name]):
266+
if field.name in peft_common_config_values:
267+
continue
268268

269-
with gr.Row():
270-
if field.type is bool:
271-
elem = gr.Checkbox()
272-
else:
273-
elem = gr.Textbox()
269+
with gr.Row():
270+
if field.type is bool:
271+
elem = gr.Checkbox()
272+
if field.type in [float, int]:
273+
elem = gr.Number()
274+
else:
275+
elem = gr.Textbox()
274276

275-
elem_dict.update({f"{peft_name}_{field.name}": elem})
276-
input_elems.update({elem})
277+
elem_dict.update({f"{peft_name}_{field.name}": elem})
278+
input_elems.update({elem})
277279

278-
LOCALES.update({f"{peft_name}_{field.name}": {"en": {"label": field.name}}})
280+
LOCALES.update({f"{peft_name}_{field.name}": {"en": {"label": field.name}}})
279281

280-
for peft_config_name in ADAPTERS_CONFIG_MAPPING:
281-
with gr.Accordion(open=False) as peft_method_tab:
282-
peft_name = peft_config_name.lower().replace(" ", "_")
282+
for peft_config_name in ADAPTERS_CONFIG_MAPPING:
283+
with gr.Accordion(open=False) as peft_method_tab:
284+
peft_name = peft_config_name.lower().replace(" ", "_")
283285

284-
elem_dict.update({peft_name: peft_method_tab})
286+
elem_dict.update({peft_name: peft_method_tab})
285287

286-
LOCALES.update({peft_name: {"en": {"label": f"{peft_config_name} configurations"}}})
288+
LOCALES.update({peft_name: {"en": {"label": f"{peft_config_name} configurations"}}})
287289

288-
for field in fields(ADAPTERS_CONFIG_MAPPING[peft_config_name]):
289-
if field.name in peft_common_config_values:
290-
continue
290+
for field in fields(ADAPTERS_CONFIG_MAPPING[peft_config_name]):
291+
if field.name in peft_common_config_values:
292+
continue
291293

292-
with gr.Row():
293-
if field.type is bool:
294-
elem = gr.Checkbox()
295-
else:
296-
elem = gr.Textbox()
294+
with gr.Row():
295+
if field.type is bool:
296+
elem = gr.Checkbox()
297+
else:
298+
elem = gr.Textbox()
297299

298-
elem_dict.update({f"{peft_name}_{field.name}": elem})
299-
input_elems.update({elem})
300+
elem_dict.update({f"{peft_name}_{field.name}": elem})
301+
input_elems.update({elem})
300302

301-
LOCALES.update({f"{peft_name}_{field.name}": {"en": {"label": field.name}}})
303+
LOCALES.update({f"{peft_name}_{field.name}": {"en": {"label": field.name}}})
302304

303-
for peft_config_name in CUSTOM_PEFT_CONFIG_MAPPING:
304-
with gr.Accordion(open=False) as peft_method_tab:
305-
peft_name = peft_config_name.lower().replace(" ", "_")
305+
for peft_config_name in CUSTOM_PEFT_CONFIG_MAPPING:
306+
with gr.Accordion(open=False) as peft_method_tab:
307+
peft_name = peft_config_name.lower().replace(" ", "_")
306308

307-
elem_dict.update({peft_name: peft_method_tab})
309+
elem_dict.update({peft_name: peft_method_tab})
308310

309-
LOCALES.update({peft_name: {"en": {"label": f"{peft_config_name} configurations"}}})
311+
LOCALES.update({peft_name: {"en": {"label": f"{peft_config_name} configurations"}}})
310312

311-
for field in fields(CUSTOM_PEFT_CONFIG_MAPPING[peft_config_name]):
312-
if field.name in peft_common_config_values:
313-
continue
313+
for field in fields(CUSTOM_PEFT_CONFIG_MAPPING[peft_config_name]):
314+
if field.name in peft_common_config_values:
315+
continue
314316

315-
with gr.Row():
316-
if field.type is bool:
317-
elem = gr.Checkbox()
318-
else:
319-
elem = gr.Textbox()
317+
with gr.Row():
318+
if field.type is bool:
319+
elem = gr.Checkbox()
320+
else:
321+
elem = gr.Textbox()
320322

321-
elem_dict.update({f"{peft_name}_{field.name}": elem})
322-
input_elems.update({elem})
323+
elem_dict.update({f"{peft_name}_{field.name}": elem})
324+
input_elems.update({elem})
323325

324-
LOCALES.update({f"{peft_name}_{field.name}": {"en": {"label": field.name}}})
326+
LOCALES.update({f"{peft_name}_{field.name}": {"en": {"label": field.name}}})
325327

326328
with gr.Accordion(open=False) as rlhf_tab:
327329
with gr.Row():

src/llamafactory/webui/runner.py

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -313,6 +313,9 @@ def _parse_train_args(self, data: dict["Component", Any]) -> dict[str, Any]:
313313
]
314314

315315
if args["finetuning_type"] in PEFT_CONFIG_MAPPING:
316+
args["task_type"] = get("train.task_type")
317+
args["inference_mode"] = get("train.inference_mode")
318+
316319
for field in fields(PEFT_CONFIG_MAPPING[args["finetuning_type"]]):
317320
if field.name in peft_common_config_values:
318321
continue
@@ -398,6 +401,8 @@ def _parse_eval_args(self, data: dict["Component", Any]) -> dict[str, Any]:
398401
args["adapter_name_or_path"] = ",".join(
399402
[get_save_dir(model_name, finetuning_type, adapter) for adapter in get("top.checkpoint_path")]
400403
)
404+
405+
args["task_type"] = get("eval.task_type")
401406
else: # str
402407
args["model_name_or_path"] = get_save_dir(model_name, finetuning_type, get("top.checkpoint_path"))
403408

@@ -407,6 +412,54 @@ def _parse_eval_args(self, data: dict["Component", Any]) -> dict[str, Any]:
407412
args["quantization_method"] = get("top.quantization_method")
408413
args["double_quantization"] = not is_torch_npu_available()
409414

415+
# peft config
416+
peft_common_config_values = [
417+
"base_model_name_or_path",
418+
"revision",
419+
"peft_type",
420+
"task_type",
421+
"inference_mode",
422+
"auto_mapping",
423+
"num_transformer_submodules",
424+
"num_attention_heads",
425+
"num_layers",
426+
"modules_to_save",
427+
"token_dim",
428+
]
429+
430+
if args["finetuning_type"] in PEFT_CONFIG_MAPPING:
431+
args["task_type"] = get("eval.task_type")
432+
args["inference_mode"] = get("eval.inference_mode")
433+
434+
for field in fields(PEFT_CONFIG_MAPPING[args["finetuning_type"]]):
435+
if field.name in peft_common_config_values:
436+
continue
437+
438+
if field.name == "target_modules":
439+
args[field.name] = ast.literal_eval(get(f"eval.{args['finetuning_type']}_{field.name}"))
440+
else:
441+
args[field.name] = get(f"eval.{args['finetuning_type']}_{field.name}")
442+
443+
elif args["finetuning_type"] in ADAPTERS_CONFIG_MAPPING:
444+
for field in fields(ADAPTERS_CONFIG_MAPPING[args["finetuning_type"]]):
445+
if field.name in peft_common_config_values:
446+
continue
447+
448+
if field.name == "target_modules":
449+
args[field.name] = ast.literal_eval(get(f"eval.{args['finetuning_type']}_{field.name}"))
450+
else:
451+
args[field.name] = get(f"eval.{args['finetuning_type']}_{field.name}")
452+
453+
elif args["finetuning_type"] in CUSTOM_PEFT_CONFIG_MAPPING:
454+
for field in fields(CUSTOM_PEFT_CONFIG_MAPPING[args["finetuning_type"]]):
455+
if field.name in peft_common_config_values:
456+
continue
457+
458+
if field.name == "target_modules":
459+
args[field.name] = ast.literal_eval(get(f"eval.{args['finetuning_type']}_{field.name}"))
460+
else:
461+
args[field.name] = get(f"eval.{args['finetuning_type']}_{field.name}")
462+
410463
return args
411464

412465
def _preview(self, data: dict["Component", Any], do_train: bool) -> Generator[dict["Component", str], None, None]:

0 commit comments

Comments
 (0)