Skip to content

gpt5-mini fails when reasoning_effort is passed via provider_kwargs #237

@huailide

Description

@huailide

Description

When using OpenAI's gpt5-mini model, the following error occurs if I include reasoning_effort in provider_kwargs:

Exception: OpenAI API error: Completions.create() got an unexpected keyword argument 'reasoning'

If I remove the reasoning_effort parameter from provider_kwargs, the request works as expected.


Expected behavior

The call should complete successfully when using gpt5-mini with reasoning_effort .


Full Code with reasoning_effort

import os
import sys
import argparse
import json
import langextract as lx
from langextract.core.data import ExampleData as example_data
from langextract import factory
from dotenv import load_dotenv
from utils import sanitize as san, consts as consts
from utils.models import Example
from typing import Any, Optional, List

load_dotenv()

def create_default_extract_example():
    examples = [
        lx.data.ExampleData(
            text=consts.DEFAULT_EXTRACT_EXAMPLE_TEXT,
            extractions=[
                lx.data.Extraction(
                    extraction_class="product_infos",
                    extraction_text=consts.DEFAULT_EXTRACT_EXAMPLE_EXTRACT_TEXT,
                    attributes=consts.DEFAULT_EXTRACT_EXAMPLE_ATTRIBUTES,
                )
            ],
        )
    ]
    return examples

def create_extract_example(input_text: str, extraction_text: str, attributes_json:str):
    attributes_dict = json.loads(attributes_json)
    examples = [
        lx.data.ExampleData(
            text=input_text,
            extractions=[
                lx.data.Extraction(
                    extraction_class="product_infos",
                    extraction_text=extraction_text,
                    attributes=attributes_dict,
                )
            ],
        )
    ]
    return examples

def create_extract_example_list(examples: Optional[List[Example]]):
    extract_example = []
    for example in examples:
        example_list = create_extract_example(
            input_text=example.input_text,
            extraction_text=example.extraction_text,
            attributes_json=example.attributes_json
        )
        extract_example.extend(example_list)
    return extract_example


def create_extract_model_config(model_id=os.getenv("LANG_EXTRACT_MODEL_ID"), temperature=0.3):
    # LangExtract make env
    config = factory.ModelConfig(
        model_id=model_id,
        provider_kwargs={
            "api_key": os.getenv("OPENAI_API_KEY"),
            "base_url": os.getenv("OPENAI_BASE_URL"),
            "temperature": temperature,
            "verbosity": "low",
            "reasoning_effort": "minimal",
        }
    )
    return config

def run_extract_task(extract_example: example_data,
                     input_text: str,
                     prompt: str,
                     model_id=os.getenv("LANG_EXTRACT_MODEL_ID"),
                     temperature=0.3) -> Any:
    if not extract_example:
        extract_example = create_default_extract_example()
    model_config = create_extract_model_config(model_id, temperature)
    result = lx.extract(
        text_or_documents=input_text,
        prompt_description=prompt,
        examples=extract_example,
        config=model_config,
        fence_output=True,
        use_schema_constraints=False
    )
    return result

Code without reasoning_effort

Remove the reasoning_effort parameter from provider_kwargs:

# ... other code
def create_extract_model_config(model_id=os.getenv("LANG_EXTRACT_MODEL_ID"), temperature=0.3):
    # LangExtract make env
    config = factory.ModelConfig(
        model_id=model_id,
        provider_kwargs={
            "api_key": os.getenv("OPENAI_API_KEY"),
            "base_url": os.getenv("OPENAI_BASE_URL"),
            "temperature": temperature,
            "verbosity": "low",
        }
    )
    return config
# ... other code

Environment

  • Python: 3.12.4
  • langextract==1.0.9
  • openai==1.107.0

Note: I use: pip install langextract, pip install langextract[openai]

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions