Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 43 additions & 11 deletions examples/enrichment/enrichment.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,39 @@
import argparse
from typing import Optional

from pydantic import BaseModel, Field

import fenic as fc


def main(config: Optional[fc.SessionConfig] = None):
def main(config: Optional[fc.SessionConfig] = None, language_model_provider: str = "openai", language_model_name: str = "gpt-4o-mini"):

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should keep OpenAI as the default provider for all examples, so let's revert the recent changes to the example scripts.

For testing different providers, we already use examples_session_config (see tests/conftest.py#L171
). This allows the example scripts to be run with Ollama—or any other provider—during unit tests, without needing to modify the examples themselves.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also, do all unit tests pass using your choice of local models?

# Configure session with semantic capabilities
config = config or fc.SessionConfig(
app_name="log_enrichment",
semantic=fc.SemanticConfig(
language_models= {
"mini": fc.OpenAILanguageModel(model_name="gpt-4o-mini", rpm=500, tpm=200_000)
}
if config is None:
# Configure language model based on provider
if language_model_provider == "openai":
language_model = fc.OpenAILanguageModel(
model_name=language_model_name,
rpm=500,
tpm=200_000
)
elif language_model_provider == "ollama":
language_model = fc.OllamaLanguageModel(
model_name=language_model_name,
host="http://localhost:11434",
rpm=100,
auto_pull=True
)
else:
raise ValueError(f"Unsupported language model provider: {language_model_provider}")

config = fc.SessionConfig(
app_name="log_enrichment",
semantic=fc.SemanticConfig(
language_models={
"mini": language_model
}
)
)
)

# Create session
session = fc.Session.get_or_create(config)
Expand Down Expand Up @@ -206,6 +225,19 @@ class ErrorAnalysis(BaseModel):


if __name__ == "__main__":
# Note: Ensure you have set your OpenAI API key:
# export OPENAI_API_KEY="your-api-key-here"
main()
parser = argparse.ArgumentParser(description="Log Enrichment Example")
parser.add_argument("--language-model-provider", default="openai",
choices=["openai", "ollama"],
help="Language model provider (default: openai)")
parser.add_argument("--language-model-name", default="gpt-4o-mini",
help="Language model name (default: gpt-4o-mini)")

args = parser.parse_args()

# Note: Ensure you have set your API key:
# For OpenAI: export OPENAI_API_KEY="your-api-key-here"
# For Ollama: ensure Ollama is running on localhost:11434
main(
language_model_provider=args.language_model_provider,
language_model_name=args.language_model_name
)
96 changes: 75 additions & 21 deletions examples/feedback_clustering/feedback_clustering.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,35 +3,67 @@
This example demonstrates how to use semantic.with_cluster_labels() and semantic.reduce()
to automatically cluster customer feedback into themes and generate summaries
for each discovered category.

Usage:
python feedback_clustering.py
python feedback_clustering.py --language-model-provider ollama --language-model-name qwen3:4b --embedding-model-provider ollama --embedding-model-name embeddinggemma:latest
"""

import argparse
from typing import Optional

import fenic as fc


def main(config: Optional[fc.SessionConfig] = None):
def main(config: Optional[fc.SessionConfig] = None, language_model_provider: str = "openai", language_model_name: str = "gpt-4o-mini", embedding_model_provider: str = "openai", embedding_model_name: str = "text-embedding-3-small"):
"""Analyze customer feedback using semantic clustering and summarization."""
# Configure session with both language models and embedding models
config = config or fc.SessionConfig(
app_name="feedback_clustering",
semantic=fc.SemanticConfig(
language_models={
"mini": fc.OpenAILanguageModel(
model_name="gpt-4o-mini",
rpm=500,
tpm=200_000,
)
},
embedding_models={
"small": fc.OpenAIEmbeddingModel(
model_name="text-embedding-3-small",
rpm=3000,
tpm=1_000_000
)
}
),
)
if config is None:
# Configure language model based on provider
if language_model_provider == "openai":
language_model = fc.OpenAILanguageModel(
model_name=language_model_name,
rpm=500,
tpm=200_000
)
elif language_model_provider == "ollama":
language_model = fc.OllamaLanguageModel(
model_name=language_model_name,
host="http://localhost:11434",
rpm=100,
auto_pull=True
)
else:
raise ValueError(f"Unsupported language model provider: {language_model_provider}")

# Configure embedding model based on provider
if embedding_model_provider == "openai":
embedding_model = fc.OpenAIEmbeddingModel(
model_name=embedding_model_name,
rpm=3000,
tpm=1_000_000
)
elif embedding_model_provider == "ollama":
embedding_model = fc.OllamaEmbeddingModel(
model_name=embedding_model_name,
host="http://localhost:11434",
rpm=100,
auto_pull=True
)
else:
raise ValueError(f"Unsupported embedding model provider: {embedding_model_provider}")

config = fc.SessionConfig(
app_name="feedback_clustering",
semantic=fc.SemanticConfig(
language_models={
"mini": language_model
},
embedding_models={
"small": embedding_model
}
),
)

# Create session
session = fc.Session.get_or_create(config)
Expand Down Expand Up @@ -193,4 +225,26 @@ def main(config: Optional[fc.SessionConfig] = None):


if __name__ == "__main__":
main()
parser = argparse.ArgumentParser(description="Customer Feedback Clustering & Analysis")
parser.add_argument("--language-model-provider", default="openai",
choices=["openai", "ollama"],
help="Language model provider (default: openai)")
parser.add_argument("--language-model-name", default="gpt-4o-mini",
help="Language model name (default: gpt-4o-mini)")
parser.add_argument("--embedding-model-provider", default="openai",
choices=["openai", "ollama"],
help="Embedding model provider (default: openai)")
parser.add_argument("--embedding-model-name", default="text-embedding-3-small",
help="Embedding model name (default: text-embedding-3-small)")

args = parser.parse_args()

# Note: Ensure you have set your API key:
# For OpenAI: export OPENAI_API_KEY="your-api-key-here"
# For Ollama: ensure Ollama is running on localhost:11434
main(
language_model_provider=args.language_model_provider,
language_model_name=args.language_model_name,
embedding_model_provider=args.embedding_model_provider,
embedding_model_name=args.embedding_model_name
)
65 changes: 50 additions & 15 deletions examples/hello_world/hello_world.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import argparse
from typing import Optional

from pydantic import BaseModel, Field
Expand All @@ -14,20 +15,34 @@ class ErrorPattern(BaseModel):
error_type: str = Field(description="Type of error (e.g., NullPointer, Timeout, ConnectionRefused)")
component: str = Field(description="Affected component or system")

def main(config: Optional[fc.SessionConfig] = None):
def main(config: Optional[fc.SessionConfig] = None, language_model_provider: str = "openai", language_model_name: str = "gpt-4o-mini", embedding_model_provider: Optional[str] = None, embedding_model_name: Optional[str] = None):
# 1. Configure session with semantic capabilities
config = config or fc.SessionConfig(
app_name="hello_debug",
semantic=fc.SemanticConfig(
language_models= {
"mini": fc.OpenAILanguageModel(
model_name="gpt-4o-mini", # Fast and effective for log analysis
rpm=500,
tpm=200_000
)
}
if config is None:
# Configure language model based on provider
if language_model_provider == "openai":
language_model = fc.OpenAILanguageModel(
model_name=language_model_name,
rpm=500,
tpm=200_000
)
elif language_model_provider == "ollama":
language_model = fc.OllamaLanguageModel(
model_name=language_model_name,
host="http://localhost:11434",
rpm=100,
auto_pull=True
)
else:
raise ValueError(f"Unsupported language model provider: {language_model_provider}")

config = fc.SessionConfig(
app_name="hello_debug",
semantic=fc.SemanticConfig(
language_models={
"mini": language_model
}
)
)
)

# Create session
session = fc.Session.get_or_create(config)
Expand Down Expand Up @@ -222,6 +237,26 @@ def main(config: Optional[fc.SessionConfig] = None):


if __name__ == "__main__":
# Note: Ensure you have set your OpenAI API key:
# export OPENAI_API_KEY="your-api-key-here"
main()
parser = argparse.ArgumentParser(description="Error Log Analyzer")
parser.add_argument("--language-model-provider", default="openai",
choices=["openai", "ollama"],
help="Language model provider (default: openai)")
parser.add_argument("--language-model-name", default="gpt-4o-mini",
help="Language model name (default: gpt-4o-mini)")
parser.add_argument("--embedding-model-provider",
choices=["openai", "ollama"],
help="Embedding model provider")
parser.add_argument("--embedding-model-name",
help="Embedding model name")

args = parser.parse_args()

# Note: Ensure you have set your API key:
# For OpenAI: export OPENAI_API_KEY="your-api-key-here"
# For Ollama: ensure Ollama is running on localhost:11434
main(
language_model_provider=args.language_model_provider,
language_model_name=args.language_model_name,
embedding_model_provider=args.embedding_model_provider,
embedding_model_name=args.embedding_model_name
)
71 changes: 44 additions & 27 deletions examples/news_analysis/news_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,45 +12,47 @@

Usage:
python news_analysis.py
python news_analysis.py --language-model-provider ollama --language-model-name qwen3:4b
"""

import argparse
from typing import Optional

from pydantic import BaseModel, Field

import fenic as fc


def main(config: Optional[fc.SessionConfig] = None):
def main(config: Optional[fc.SessionConfig] = None, language_model_provider: str = "openai", language_model_name: str = "gpt-4o-mini"):
"""Main analysis pipeline for news article bias detection."""
# Configure session with semantic capabilities
# Set your `OPENAI_API_KEY` environment variable.
# Alternatively, you can run the example with an Gemini (`GOOGLE_API_KEY`) model by uncommenting the provided additional model configurations.
# Using an Anthropic model requires installing fenic with the `anthropic` extra package, and setting the `ANTHROPIC_API_KEY` environment variable
print("🔧 Configuring fenic session...")
config = config or fc.SessionConfig(
app_name="news_analysis",
semantic=fc.SemanticConfig(
language_models={
"openai": fc.OpenAILanguageModel(
model_name="gpt-4o-mini",
rpm=500,
tpm=200_000
),
# "gemini": fc.GoogleDeveloperLanguageModel(
# model_name="gemini-2.0-flash",
# rpm=500,
# tpm=1_000_000
# ),
# "anthropic": fc.AnthropicLanguageModel(
# model_name="claude-3-5-haiku-latest",
# rpm=500,
# input_tpm=80_000,
# output_tpm=32_000,
# )
}
if config is None:
# Configure language model based on provider
if language_model_provider == "openai":
language_model = fc.OpenAILanguageModel(
model_name=language_model_name,
rpm=500,
tpm=200_000
)
elif language_model_provider == "ollama":
language_model = fc.OllamaLanguageModel(
model_name=language_model_name,
host="http://localhost:11434",
rpm=100,
auto_pull=True
)
else:
raise ValueError(f"Unsupported language model provider: {language_model_provider}")

config = fc.SessionConfig(
app_name="news_analysis",
semantic=fc.SemanticConfig(
language_models={
"default": language_model
}
)
)
)

# Create session
session = fc.Session.get_or_create(config)
Expand Down Expand Up @@ -402,4 +404,19 @@ class ArticleAnalysis(BaseModel):


if __name__ == "__main__":
main()
parser = argparse.ArgumentParser(description="News Article Bias Detection")
parser.add_argument("--language-model-provider", default="openai",
choices=["openai", "ollama"],
help="Language model provider (default: openai)")
parser.add_argument("--language-model-name", default="gpt-4o-mini",
help="Language model name (default: gpt-4o-mini)")

args = parser.parse_args()

# Note: Ensure you have set your API key:
# For OpenAI: export OPENAI_API_KEY="your-api-key-here"
# For Ollama: ensure Ollama is running on localhost:11434
main(
language_model_provider=args.language_model_provider,
language_model_name=args.language_model_name
)
Loading