Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
135 commits
Select commit Hold shift + click to select a range
17745d5
Add trace based formatter
chiang-daniel Oct 21, 2025
08028ee
Add support for OPENAI_CHAT_JSON_SCHEMA_JSONL
chiang-daniel Oct 22, 2025
4929475
use supplied sys prompt
chiang-daniel Oct 23, 2025
08964f8
Add json validation helper
chiang-daniel Oct 23, 2025
7ba3b67
implement OPENAI_CHAT_TOOLCALL_JSONL
chiang-daniel Oct 23, 2025
24e0244
implement hugging face
chiang-daniel Oct 23, 2025
6ee12b9
Add support for Vertex
chiang-daniel Oct 27, 2025
1d020fa
Add test
chiang-daniel Oct 27, 2025
51fede6
better error message
chiang-daniel Oct 28, 2025
3055147
Add tests
chiang-daniel Oct 28, 2025
ebf48ee
Fix hugging face
chiang-daniel Oct 28, 2025
06c164e
Add test for HUGGINGFACE_CHAT_TEMPLATE_TOOLCALL_JSONL
chiang-daniel Oct 28, 2025
917f6c9
typo
chiang-daniel Oct 28, 2025
c34d086
CoderRabbit
chiang-daniel Oct 28, 2025
e9be634
revert back to old HF format
chiang-daniel Oct 28, 2025
9f4e483
better function name tracking
chiang-daniel Oct 29, 2025
c674e6b
fine-tune vertex with parallel tool calls
chiang-daniel Oct 29, 2025
6b60f8e
flush user messages as well
chiang-daniel Oct 29, 2025
cf3a91f
coder rabbit
chiang-daniel Oct 29, 2025
fae29d2
clean up
chiang-daniel Oct 29, 2025
f17be0a
feedback
chiang-daniel Oct 30, 2025
460c251
Add trace based formatter
chiang-daniel Oct 21, 2025
c529471
Add support for OPENAI_CHAT_JSON_SCHEMA_JSONL
chiang-daniel Oct 22, 2025
2a6b3e3
use supplied sys prompt
chiang-daniel Oct 23, 2025
e4bc4f5
Add json validation helper
chiang-daniel Oct 23, 2025
04b30e9
implement OPENAI_CHAT_TOOLCALL_JSONL
chiang-daniel Oct 23, 2025
c78789b
implement hugging face
chiang-daniel Oct 23, 2025
1b71eca
Add support for Vertex
chiang-daniel Oct 27, 2025
17f946f
Add test
chiang-daniel Oct 27, 2025
d606e08
better error message
chiang-daniel Oct 28, 2025
65b9e14
Add tests
chiang-daniel Oct 28, 2025
3d167cc
Fix hugging face
chiang-daniel Oct 28, 2025
84d2481
Add test for HUGGINGFACE_CHAT_TEMPLATE_TOOLCALL_JSONL
chiang-daniel Oct 28, 2025
8db7b95
typo
chiang-daniel Oct 28, 2025
4582f7a
CoderRabbit
chiang-daniel Oct 28, 2025
c08bf83
revert back to old HF format
chiang-daniel Oct 28, 2025
6b75eff
Add run-config creation
chiang-daniel Oct 31, 2025
3d2d559
Add run-config to fine-tune
chiang-daniel Nov 1, 2025
af6c9b0
Filter models based on tool support
chiang-daniel Nov 3, 2025
3a24615
fix
chiang-daniel Nov 3, 2025
d09df97
hide tools when using a model that doesn't support tools
chiang-daniel Nov 3, 2025
7c76e2d
Add tool info for dataset
chiang-daniel Nov 4, 2025
1eb06c7
filter existing dataset based on tools selected
chiang-daniel Nov 5, 2025
f5bae58
show disabled dataset
chiang-daniel Nov 5, 2025
6331a6b
Add trace based formatter
chiang-daniel Oct 21, 2025
1bbce5f
Add support for OPENAI_CHAT_JSON_SCHEMA_JSONL
chiang-daniel Oct 22, 2025
71c4e48
use supplied sys prompt
chiang-daniel Oct 23, 2025
96944f5
Add json validation helper
chiang-daniel Oct 23, 2025
dc709c6
implement OPENAI_CHAT_TOOLCALL_JSONL
chiang-daniel Oct 23, 2025
c9f09e0
implement hugging face
chiang-daniel Oct 23, 2025
c25044d
Add support for Vertex
chiang-daniel Oct 27, 2025
7ad3f1d
Add test
chiang-daniel Oct 27, 2025
deeb72e
better error message
chiang-daniel Oct 28, 2025
1e5eb5b
Add tests
chiang-daniel Oct 28, 2025
bcfeed5
Fix hugging face
chiang-daniel Oct 28, 2025
fdf6041
Add test for HUGGINGFACE_CHAT_TEMPLATE_TOOLCALL_JSONL
chiang-daniel Oct 28, 2025
f2eaaa1
typo
chiang-daniel Oct 28, 2025
ed07487
CoderRabbit
chiang-daniel Oct 28, 2025
d50823b
revert back to old HF format
chiang-daniel Oct 28, 2025
11d15c5
better function name tracking
chiang-daniel Oct 29, 2025
c0377dc
fine-tune vertex with parallel tool calls
chiang-daniel Oct 29, 2025
b87ca11
flush user messages as well
chiang-daniel Oct 29, 2025
de340e3
coder rabbit
chiang-daniel Oct 29, 2025
93860d1
clean up
chiang-daniel Oct 29, 2025
3c6da68
feedback
chiang-daniel Oct 30, 2025
a7bed2b
Merge branch 'dchiang/KIL-175/trace-in-fine-tune' of https://github.c…
chiang-daniel Nov 5, 2025
d9eae60
Add run-config creation
chiang-daniel Oct 31, 2025
5fc25dd
Add run-config to fine-tune
chiang-daniel Nov 1, 2025
400856d
Filter models based on tool support
chiang-daniel Nov 3, 2025
8274399
fix
chiang-daniel Nov 3, 2025
f533d87
hide tools when using a model that doesn't support tools
chiang-daniel Nov 3, 2025
2a228c0
Add tool info for dataset
chiang-daniel Nov 4, 2025
ba3887e
filter existing dataset based on tools selected
chiang-daniel Nov 5, 2025
2f20613
show disabled dataset
chiang-daniel Nov 5, 2025
f7eab95
Add API to filter fine-tune dataset with tools
chiang-daniel Nov 6, 2025
b81dacd
use fancy select
chiang-daniel Nov 6, 2025
44ba44c
Merge branch 'dchiang/KIL-177/save-run-in-finetune' of https://github…
chiang-daniel Nov 7, 2025
c27c779
Default to create new data sets
chiang-daniel Nov 7, 2025
5e6722e
make dataset formatter async
chiang-daniel Nov 11, 2025
6d8ae04
add support for tools
chiang-daniel Nov 11, 2025
8eb92ac
add test
chiang-daniel Nov 11, 2025
c0c058b
add openai jsonl
chiang-daniel Nov 11, 2025
8c4f32e
fix vertex with tools
chiang-daniel Nov 11, 2025
3f11ddb
update UI
chiang-daniel Nov 11, 2025
6c2cb6f
fix prompt selector not being plumb properly
chiang-daniel Nov 14, 2025
19b4760
Add basic types to chat formatter
chiang-daniel Nov 15, 2025
5895fdc
add build_tool_call_messages
chiang-daniel Nov 15, 2025
b678d93
plumb open ai format
chiang-daniel Nov 16, 2025
711e58f
Add test for open ai and open ai jsonl
chiang-daniel Nov 17, 2025
686c2e4
Add test
chiang-daniel Nov 17, 2025
3813b65
add vertex
chiang-daniel Nov 17, 2025
3ae6ee6
disable tool selector
chiang-daniel Nov 18, 2025
0198264
UI polish
chiang-daniel Nov 18, 2025
89537ba
fix spacing
chiang-daniel Nov 18, 2025
1c29ca0
Merge branch 'main' into dchiang/KIL-177/save-run-in-finetune
chiang-daniel Nov 18, 2025
62fd7d2
revert select fine-tune dataset
chiang-daniel Nov 18, 2025
5dac78e
plumb selected tools through SDG
chiang-daniel Nov 18, 2025
786e707
disable together ai
chiang-daniel Nov 18, 2025
dec38e6
remove trace based dataset formatter
chiang-daniel Nov 18, 2025
c65ae91
Add analytics
chiang-daniel Nov 19, 2025
89001c7
filter dataset based on tool selection
chiang-daniel Nov 19, 2025
3365bc5
update UI message
chiang-daniel Nov 19, 2025
edce5d1
use undefined to required_tool_ids
chiang-daniel Nov 19, 2025
c5c5c53
store page data
chiang-daniel Nov 20, 2025
d629171
clean up
chiang-daniel Nov 20, 2025
32926bd
use empty string for tool call content
chiang-daniel Nov 20, 2025
86e6fd5
ui tweaks
sfierro Nov 20, 2025
4db9cb9
sam's changes to refactor tool selector to take in settings prop and …
sfierro Nov 20, 2025
0c33a74
default tools selector settings
sfierro Nov 20, 2025
0121f9d
fix empty tool label
chiang-daniel Nov 21, 2025
ecc83aa
fix select existing dataset is visible when there are dataset split w…
chiang-daniel Nov 21, 2025
f0dd0bf
coder rabbit
chiang-daniel Nov 21, 2025
b280e4d
coder rabbit
chiang-daniel Nov 21, 2025
bfe2448
add caching for tools so we don't make extra server calls
chiang-daniel Nov 21, 2025
cf1d93f
Add reset & doc button to match SDG
chiang-daniel Nov 21, 2025
4aa1daf
remove logging
chiang-daniel Nov 21, 2025
3fe4284
coder rabbit
chiang-daniel Nov 21, 2025
c0d90a1
UI CR from Leonard
chiang-daniel Nov 21, 2025
ce72f0b
optional on the form
chiang-daniel Nov 21, 2025
f4aa708
move reasoning dataset to after the dataset has been selected
chiang-daniel Nov 21, 2025
f6b7382
increase timeout on fireworks finetune, 5s http timeout is not enough…
chiang-daniel Nov 21, 2025
2f1fd4a
add finetune run config to run page selection
chiang-daniel Nov 26, 2025
c76dc65
typo
chiang-daniel Nov 26, 2025
178c710
reload store when new fine-tune is created
chiang-daniel Nov 26, 2025
fe1569f
Merge branch 'main' into dchiang/KIL-177/save-run-in-finetune
chiang-daniel Nov 26, 2025
cf7faae
save model name and provider
chiang-daniel Nov 27, 2025
dcb8ff2
persist prompt id & structured_output_mode
chiang-daniel Nov 27, 2025
27dc088
Clean fine-tuning UI for fine-tune-run-configs. Also lots of concurre…
scosman Nov 28, 2025
a13e08e
Merge branch 'dchiang/KIL-177/save-run-in-finetune' into scosman/run_…
chiang-daniel Nov 28, 2025
4357c16
remove todos
chiang-daniel Nov 28, 2025
86dcd0c
fix typo
chiang-daniel Nov 28, 2025
f4b4b99
fix issues where model/run-config not loaded on first select
chiang-daniel Nov 28, 2025
76f39c3
typecheck fix
scosman Nov 28, 2025
bece629
don't reload run_configs constantly in UI. Only force reload if force…
scosman Nov 28, 2025
4adf7f4
Merge pull request #865 from Kiln-AI/scosman/run_config_ui_updates_v2
scosman Nov 28, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions app/desktop/studio_server/eval_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,27 @@ async def get_task_run_configs(
task = task_from_id(project_id, task_id)
return task.run_configs()

@app.get("/api/projects/{project_id}/tasks/{task_id}/run_configs/")
async def get_run_configs(project_id: str, task_id: str) -> list[TaskRunConfig]:
# Returns all run configs of a given task.
task = task_from_id(project_id, task_id)
configs = task.run_configs()

# Get run configs from finetunes
finetunes = task.finetunes()
for finetune in finetunes:
if finetune.run_config is not None:
configs.append(
TaskRunConfig(
id=f"finetune_run_config::{project_id}::{task_id}::{finetune.id}",
name=finetune.name,
description=finetune.description,
run_config_properties=finetune.run_config,
)
)

return configs

@app.get("/api/projects/{project_id}/tasks/{task_id}/eval/{eval_id}")
async def get_eval(project_id: str, task_id: str, eval_id: str) -> Eval:
return eval_from_id(project_id, task_id, eval_id)
Expand Down
139 changes: 105 additions & 34 deletions app/desktop/studio_server/finetune_api.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import logging
from enum import Enum
from typing import Dict
from typing import Annotated, Dict

import httpx
from fastapi import FastAPI, HTTPException
from fastapi import FastAPI, HTTPException, Query
from fastapi.responses import StreamingResponse
from kiln_ai.adapters.fine_tune.base_finetune import FineTuneParameter, FineTuneStatus
from kiln_ai.adapters.fine_tune.dataset_formatter import (
Expand Down Expand Up @@ -42,6 +42,7 @@
Train80Test20SplitDefinition,
Train80Val20SplitDefinition,
)
from kiln_ai.datamodel.run_config import RunConfigProperties
from kiln_ai.utils.config import Config
from kiln_ai.utils.name_generator import generate_memorable_name
from kiln_server.task_api import task_from_id
Expand All @@ -61,6 +62,7 @@ class FinetuneProviderModel(BaseModel):
ChatStrategy.two_message_cot,
]
)
supports_function_calling: bool = True


class FinetuneProvider(BaseModel):
Expand Down Expand Up @@ -89,6 +91,10 @@ class FinetuneDatasetInfo(BaseModel):
existing_finetunes: list[Finetune]
finetune_tags: list[FinetuneDatasetTagInfo]

# eligible finetune data based on tool selection
eligible_datasets: list[DatasetSplit]
eligible_finetune_tags: list[FinetuneDatasetTagInfo]


class DatasetSplitType(Enum):
"""Dataset split types used in the API. Any split type can be created in code."""
Expand Down Expand Up @@ -133,6 +139,7 @@ class CreateFinetuneRequest(BaseModel):
custom_system_message: str | None = None
custom_thinking_instructions: str | None = None
data_strategy: ChatStrategy
run_config_properties: RunConfigProperties | None = None

@model_validator(mode="after")
def validate_data_strategy(self):
Expand Down Expand Up @@ -170,6 +177,72 @@ def finetune_from_id(project_id: str, task_id: str, finetune_id: str) -> Finetun
return finetune


def compute_finetune_tag_info(
task: Task, tool_filter: list[str] | None = None
) -> list[FinetuneDatasetTagInfo]:
"""
Compute finetune tag counts with quality filters, optionally filtering by tools.

Args:
task: The task to analyze
tool_filter: Optional list of tool IDs to filter by. Only runs with exactly these tools will be included.

Returns:
List of FinetuneDatasetTagInfo with counts for each fine_tune tag
"""
finetune_tag_counts: Dict[str, int] = {}
reasoning_count: Dict[str, int] = {}
high_quality_count: Dict[str, int] = {}
reasoning_and_high_quality_count: Dict[str, int] = {}

required_tools_set = set(tool_filter) if tool_filter else None
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bug: Empty tool filter treated as no filter

The compute_finetune_tag_info function's docstring states "Only runs with exactly these tools will be included", but passing an empty list [] as tool_filter is treated the same as None (no filtering). This is because set(tool_filter) if tool_filter else None evaluates bool([]) as False. If someone wants to filter to runs with exactly zero tools, this won't work as documented. The condition should check if tool_filter is not None instead of just if tool_filter to properly distinguish between "no filter requested" and "filter to runs with no tools". The same pattern exists for eligible_datasets filtering at line 380. The frontend guards against this by only sending non-empty arrays, but the backend function's behavior is inconsistent with its documentation.

Additional Locations (1)

Fix in Cursor Fix in Web


for sample in task.runs(readonly=True):
# filter by tools if provided
if required_tools_set is not None:
if sample.output is None:
# A few places in code guards against this though the field isn't optional. Just be defensive.
continue

run_tools_set: set[str] = set()
if (
sample.output.source
and sample.output.source.run_config
and sample.output.source.run_config.tools_config
):
run_tools_set = set(sample.output.source.run_config.tools_config.tools)

if run_tools_set != required_tools_set:
continue

for tag in sample.tags:
if tag.startswith("fine_tune"):
finetune_tag_counts[tag] = finetune_tag_counts.get(tag, 0) + 1
is_reasoning = ThinkingModelDatasetFilter(sample)
is_high_quality = HighRatingDatasetFilter(sample)
if is_reasoning:
reasoning_count[tag] = reasoning_count.get(tag, 0) + 1
if is_high_quality:
high_quality_count[tag] = high_quality_count.get(tag, 0) + 1
if is_reasoning and is_high_quality:
reasoning_and_high_quality_count[tag] = (
reasoning_and_high_quality_count.get(tag, 0) + 1
)

return [
FinetuneDatasetTagInfo(
tag=tag,
count=count,
reasoning_count=reasoning_count.get(tag, 0),
high_quality_count=high_quality_count.get(tag, 0),
reasoning_and_high_quality_count=reasoning_and_high_quality_count.get(
tag, 0
),
)
for tag, count in finetune_tag_counts.items()
]


def connect_fine_tune_api(app: FastAPI):
@app.get("/api/projects/{project_id}/tasks/{task_id}/dataset_splits")
async def dataset_splits(project_id: str, task_id: str) -> list[DatasetSplit]:
Expand Down Expand Up @@ -239,10 +312,17 @@ async def finetune_providers() -> list[FinetuneProvider]:
if provider.provider_finetune_id:
if provider.name not in provider_models:
provider_models[provider.name] = []

# special case, Together AI doesn't support fine-tuning with tools
supports_tools = (
provider.supports_function_calling
and provider.name != ModelProviderName.together_ai
)
provider_models[provider.name].append(
FinetuneProviderModel(
name=model.friendly_name,
id=provider.provider_finetune_id,
supports_function_calling=supports_tools,
)
)

Expand Down Expand Up @@ -285,46 +365,32 @@ async def finetune_hyperparameters(

@app.get("/api/projects/{project_id}/tasks/{task_id}/finetune_dataset_info")
async def finetune_dataset_info(
project_id: str, task_id: str
project_id: str,
task_id: str,
tool_ids: Annotated[list[str] | None, Query()] = None,
) -> FinetuneDatasetInfo:
task = task_from_id(project_id, task_id)
existing_datasets = task.dataset_splits()
existing_finetunes = task.finetunes()

finetune_tag_counts: Dict[str, int] = {}
reasoning_count: Dict[str, int] = {}
high_quality_count: Dict[str, int] = {}
reasoning_and_high_quality_count: Dict[str, int] = {}
for sample in task.runs(readonly=True):
for tag in sample.tags:
if tag.startswith("fine_tune"):
finetune_tag_counts[tag] = finetune_tag_counts.get(tag, 0) + 1
is_reasoning = ThinkingModelDatasetFilter(sample)
is_high_quality = HighRatingDatasetFilter(sample)
if is_reasoning:
reasoning_count[tag] = reasoning_count.get(tag, 0) + 1
if is_high_quality:
high_quality_count[tag] = high_quality_count.get(tag, 0) + 1
if is_reasoning and is_high_quality:
reasoning_and_high_quality_count[tag] = (
reasoning_and_high_quality_count.get(tag, 0) + 1
)
finetune_tags = compute_finetune_tag_info(task, tool_filter=None)
eligible_finetune_tags = compute_finetune_tag_info(task, tool_filter=tool_ids)

eligible_datasets = existing_datasets
if tool_ids:
required_tools_set = set(tool_ids)
eligible_datasets = [
dataset
for dataset in existing_datasets
if set(dataset.tool_info().tools) == required_tools_set
]

return FinetuneDatasetInfo(
existing_datasets=existing_datasets,
existing_finetunes=existing_finetunes,
finetune_tags=[
FinetuneDatasetTagInfo(
tag=tag,
count=count,
reasoning_count=reasoning_count.get(tag, 0),
high_quality_count=high_quality_count.get(tag, 0),
reasoning_and_high_quality_count=reasoning_and_high_quality_count.get(
tag, 0
),
)
for tag, count in finetune_tag_counts.items()
],
finetune_tags=finetune_tags,
eligible_datasets=eligible_datasets,
eligible_finetune_tags=eligible_finetune_tags,
)

@app.post("/api/projects/{project_id}/tasks/{task_id}/dataset_splits")
Expand Down Expand Up @@ -392,6 +458,7 @@ async def create_finetune(
description=request.description,
validation_split_name=request.validation_split_name,
data_strategy=request.data_strategy,
run_config=request.run_config_properties,
)

return finetune_model
Expand Down Expand Up @@ -447,7 +514,7 @@ async def download_dataset_jsonl(
system_message=system_message,
thinking_instructions=thinking_instructions,
)
path = dataset_formatter.dump_to_file(
path = await dataset_formatter.dump_to_file(
split_name,
format_type_typed,
data_strategy_typed,
Expand Down Expand Up @@ -564,10 +631,14 @@ async def fetch_fireworks_finetune_models() -> list[FinetuneProviderModel]:
else:
name = display_name + " (" + id_tail + ")"

# Check if the model supports tools via 'supportTools'
supports_tools = model.get("supportsTools", False)

tuneable_models.append(
FinetuneProviderModel(
name=name,
id=id,
supports_function_calling=supports_tools,
)
)

Expand Down
38 changes: 28 additions & 10 deletions app/desktop/studio_server/provider_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
)
from kiln_ai.adapters.provider_tools import provider_name_from_id, provider_warnings
from kiln_ai.adapters.reranker_list import built_in_rerankers
from kiln_ai.datamodel.finetune import Finetune
from kiln_ai.datamodel.registry import all_projects
from kiln_ai.utils.config import Config
from kiln_ai.utils.exhaustive_error import raise_exhaustive_enum_error
Expand Down Expand Up @@ -155,6 +156,8 @@ class ModelDetails(BaseModel):
# True if this is a untested model (typically user added). We don't know if these support structured output, data gen, etc. They should appear in their own section in the UI.
untested_model: bool = Field(default=False)
task_filter: List[str] | None = Field(default=None)
# if the model has a model-specific run config which should be used when running the model (like a fine-tune model's baked in run config)
model_specific_run_config: str | None = Field(default=None)


class AvailableModels(BaseModel):
Expand Down Expand Up @@ -1438,6 +1441,22 @@ def custom_models() -> AvailableModels | None:
)


def fine_tune_model_structured_output_mode(
fine_tune: Finetune,
) -> StructuredOutputMode:
# Current field
if fine_tune.run_config and fine_tune.run_config.structured_output_mode is not None:
return fine_tune.run_config.structured_output_mode
# Legacy field
legacy_structured_output_mode = fine_tune.structured_output_mode
if legacy_structured_output_mode is not None and isinstance(
legacy_structured_output_mode, StructuredOutputMode
):
return legacy_structured_output_mode
# Fallback
return StructuredOutputMode.json_instructions


def all_fine_tuned_models() -> AvailableModels | None:
# Add any fine tuned models
models: List[ModelDetails] = []
Expand All @@ -1447,9 +1466,14 @@ def all_fine_tuned_models() -> AvailableModels | None:
for fine_tune in task.finetunes():
# check if the fine tune is completed
if fine_tune.fine_tune_model_id:
model_specific_run_config = (
f"finetune_run_config::{project.id}::{task.id}::{fine_tune.id}"
if fine_tune.run_config is not None
else None
)
models.append(
ModelDetails(
id=f"{project.id}::{task.id}::{fine_tune.id}",
id=fine_tune.model_id(),
name=fine_tune.name
+ f" ({provider_name_from_id(fine_tune.provider)})",
# YMMV, but we'll assume all fine tuned models support structured output, data gen, and tools as they may have been trained with them
Expand All @@ -1462,16 +1486,10 @@ def all_fine_tuned_models() -> AvailableModels | None:
suggested_for_evals=False,
uncensored=False,
suggested_for_uncensored_data_gen=False,
structured_output_mode=(
fine_tune_mode
if (
fine_tune_mode := getattr(
fine_tune, "structured_output_mode", None
)
)
and isinstance(fine_tune_mode, StructuredOutputMode)
else StructuredOutputMode.json_instructions
structured_output_mode=fine_tune_model_structured_output_mode(
fine_tune
),
model_specific_run_config=model_specific_run_config,
supports_vision=False,
supports_doc_extraction=False,
suggested_for_doc_extraction=False,
Expand Down
Loading
Loading