Skip to content

Commit adccd26

Browse files
committed
Fix additional ruff linting errors
- Remove unused imports (extract_span_context, store_span_context) - Fix import sorting in traceloop files - Remove unnecessary f-string without placeholders - Fix undefined logger references (changed to _LOGGER) - Apply ruff formatting to affected files All files now pass 'ruff check' and 'ruff format --check'
1 parent b65de0f commit adccd26

File tree

9 files changed

+105
-89
lines changed

9 files changed

+105
-89
lines changed

util/opentelemetry-util-genai-evals/src/opentelemetry/util/genai/evals/env.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ def read_aggregation_flag(
4949
return None
5050
return raw.strip().lower() in _TRUTHY
5151

52+
5253
__all__ = [
5354
"read_raw_evaluators",
5455
"read_interval",

util/opentelemetry-util-genai-evals/src/opentelemetry/util/genai/evals/manager.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
from ..environment_variables import (
1616
OTEL_INSTRUMENTATION_GENAI_EVALS_EVALUATORS,
1717
)
18-
from ..span_context import extract_span_context, store_span_context
1918
from ..types import (
2019
AgentCreation,
2120
AgentInvocation,

util/opentelemetry-util-genai-traceloop-translator/examples/traceloop_processor_example.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,16 @@
33
from __future__ import annotations
44

55
import os
6+
67
from dotenv import load_dotenv
78

89
# Load .env first
910
load_dotenv()
1011

1112
try:
12-
from traceloop.sdk import Traceloop
13-
from traceloop.sdk.decorators import task, workflow, agent, tool
1413
from openai import OpenAI
14+
from traceloop.sdk import Traceloop
15+
from traceloop.sdk.decorators import agent, task, tool, workflow
1516

1617
# Initialize Traceloop - this will also trigger TraceloopSpanProcessor registration
1718
Traceloop.init(disable_batch=True, api_endpoint="http://localhost:4318")
@@ -48,7 +49,7 @@ def translate_joke_to_pirate(joke: str):
4849
def history_jokes_tool():
4950
completion = client.chat.completions.create(
5051
model="gpt-3.5-turbo",
51-
messages=[{"role": "user", "content": f"get some history jokes"}],
52+
messages=[{"role": "user", "content": "get some history jokes"}],
5253
)
5354

5455
return completion.choices[0].message.content

util/opentelemetry-util-genai-traceloop-translator/src/opentelemetry/util/genai/processor/traceloop_span_processor.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,12 +26,11 @@
2626
from opentelemetry.context import Context
2727
from opentelemetry.sdk.trace import ReadableSpan, SpanProcessor
2828
from opentelemetry.trace import Span
29-
30-
from opentelemetry.util.genai.types import LLMInvocation
3129
from opentelemetry.util.genai.handler import (
32-
get_telemetry_handler,
3330
TelemetryHandler,
31+
get_telemetry_handler,
3432
)
33+
from opentelemetry.util.genai.types import LLMInvocation
3534

3635
from .content_normalizer import normalize_traceloop_content
3736

util/opentelemetry-util-genai-traceloop-translator/tests/test_traceloop_integration.py

Lines changed: 62 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -6,17 +6,14 @@
66

77
import json
88
import os
9+
910
import pytest
10-
from unittest.mock import Mock, patch
1111

12-
from opentelemetry import trace
13-
from opentelemetry.sdk.trace import TracerProvider, ReadableSpan
12+
from opentelemetry.sdk.trace import TracerProvider
1413
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
1514
from opentelemetry.sdk.trace.export.in_memory_span_exporter import (
1615
InMemorySpanExporter,
1716
)
18-
from opentelemetry.trace import SpanKind
19-
2017
from opentelemetry.util.genai.processor.traceloop_span_processor import (
2118
TraceloopSpanProcessor,
2219
)
@@ -122,9 +119,9 @@ def test_simple_workflow_with_tasks(self, setup_tracer):
122119
and s.attributes.get("gen_ai.workflow.name")
123120
== "pirate_joke_generator"
124121
]
125-
assert len(workflow_spans) >= 1, (
126-
"Should have at least one workflow span"
127-
)
122+
assert (
123+
len(workflow_spans) >= 1
124+
), "Should have at least one workflow span"
128125

129126
# Find task spans
130127
task_spans = [
@@ -150,9 +147,9 @@ def test_simple_workflow_with_tasks(self, setup_tracer):
150147
traceloop_keys = [
151148
k for k in traceloop_keys if k != "_traceloop_processed"
152149
]
153-
assert len(traceloop_keys) == 0, (
154-
f"Span {span.name} should not have traceloop.* attributes, found: {traceloop_keys}"
155-
)
150+
assert (
151+
len(traceloop_keys) == 0
152+
), f"Span {span.name} should not have traceloop.* attributes, found: {traceloop_keys}"
156153

157154
def test_nested_agent_with_tool(self, setup_tracer):
158155
"""Test @agent pattern with nested @tool calls."""
@@ -247,9 +244,9 @@ def test_nested_agent_with_tool(self, setup_tracer):
247244
input_data = json.loads(
248245
agent_with_input[0].attributes["gen_ai.input.messages"]
249246
)
250-
assert isinstance(input_data, list), (
251-
"Input should be normalized to message array"
252-
)
247+
assert isinstance(
248+
input_data, list
249+
), "Input should be normalized to message array"
253250

254251

255252
class TestParentChildRelationships:
@@ -288,9 +285,9 @@ def test_parent_child_hierarchy_preserved(self, setup_tracer):
288285
if child.parent and child.parent.span_id in span_map:
289286
valid_parent_refs += 1
290287

291-
assert valid_parent_refs >= 1, (
292-
"At least one child should have a valid parent reference"
293-
)
288+
assert (
289+
valid_parent_refs >= 1
290+
), "At least one child should have a valid parent reference"
294291

295292

296293
class TestContentNormalization:
@@ -345,22 +342,22 @@ def test_normalize_entity_input_output(self, setup_tracer):
345342
]
346343

347344
# Should have at least the mutated original span with gen_ai.input.messages
348-
assert len(spans_with_input) >= 1, (
349-
f"Should have spans with normalized input, got {len(spans)} spans total"
350-
)
345+
assert (
346+
len(spans_with_input) >= 1
347+
), f"Should have spans with normalized input, got {len(spans)} spans total"
351348

352349
# Verify normalization
353350
for span in spans_with_input:
354351
input_str = span.attributes.get("gen_ai.input.messages")
355352
if input_str:
356353
input_data = json.loads(input_str)
357-
assert isinstance(input_data, list), (
358-
"Input should be list of messages"
359-
)
354+
assert isinstance(
355+
input_data, list
356+
), "Input should be list of messages"
360357
if input_data:
361-
assert "role" in input_data[0], (
362-
"Messages should have role field"
363-
)
358+
assert (
359+
"role" in input_data[0]
360+
), "Messages should have role field"
364361

365362
# Check output normalization
366363
spans_with_output = [
@@ -374,9 +371,9 @@ def test_normalize_entity_input_output(self, setup_tracer):
374371
"gen_ai.output.messages"
375372
)
376373
output_data = json.loads(output_str)
377-
assert isinstance(output_data, list), (
378-
"Output should be list of messages"
379-
)
374+
assert isinstance(
375+
output_data, list
376+
), "Output should be list of messages"
380377

381378
def test_normalize_string_input(self, setup_tracer):
382379
"""Test normalization of simple string inputs."""
@@ -402,9 +399,9 @@ def test_normalize_string_input(self, setup_tracer):
402399
and any(k.startswith("gen_ai.") for k in s.attributes.keys())
403400
]
404401

405-
assert len(spans_with_genai) >= 1, (
406-
"Should have spans with gen_ai.* attributes after processing"
407-
)
402+
assert (
403+
len(spans_with_genai) >= 1
404+
), "Should have spans with gen_ai.* attributes after processing"
408405

409406
def test_normalize_list_of_strings(self, setup_tracer):
410407
"""Test normalization of list inputs."""
@@ -431,9 +428,9 @@ def test_normalize_list_of_strings(self, setup_tracer):
431428
for s in spans
432429
if s.attributes and "gen_ai.span.kind" in s.attributes
433430
]
434-
assert len(spans_with_genai) >= 1, (
435-
"Should have processed spans with gen_ai attributes"
436-
)
431+
assert (
432+
len(spans_with_genai) >= 1
433+
), "Should have processed spans with gen_ai attributes"
437434

438435

439436
class TestModelInference:
@@ -480,9 +477,9 @@ def test_preserve_explicit_model(self, setup_tracer):
480477
and s.attributes.get("gen_ai.request.model") == "gpt-4"
481478
]
482479

483-
assert len(spans_with_model) >= 1, (
484-
"Should preserve explicit model attribute"
485-
)
480+
assert (
481+
len(spans_with_model) >= 1
482+
), "Should preserve explicit model attribute"
486483

487484

488485
class TestSpanFiltering:
@@ -500,18 +497,18 @@ def test_filters_non_llm_spans(self, setup_tracer):
500497
spans = exporter.get_finished_spans()
501498

502499
# Should only have the original span, no synthetic spans
503-
assert len(spans) == 1, (
504-
f"Expected 1 span (non-LLM filtered), got {len(spans)}"
505-
)
500+
assert (
501+
len(spans) == 1
502+
), f"Expected 1 span (non-LLM filtered), got {len(spans)}"
506503

507504
# Original span should not have gen_ai.* attributes
508505
span = spans[0]
509506
gen_ai_attrs = [
510507
k for k in span.attributes.keys() if k.startswith("gen_ai.")
511508
]
512-
assert len(gen_ai_attrs) == 0, (
513-
"Non-LLM span should not have gen_ai.* attributes"
514-
)
509+
assert (
510+
len(gen_ai_attrs) == 0
511+
), "Non-LLM span should not have gen_ai.* attributes"
515512

516513
def test_includes_traceloop_spans(self, setup_tracer):
517514
"""Test that Traceloop task/workflow spans are included."""
@@ -534,9 +531,9 @@ def test_includes_traceloop_spans(self, setup_tracer):
534531
for s in spans
535532
if s.attributes and s.attributes.get("gen_ai.span.kind") == "task"
536533
]
537-
assert len(spans_with_kind) >= 1, (
538-
f"Traceloop task should be transformed, got {len(spans)} spans"
539-
)
534+
assert (
535+
len(spans_with_kind) >= 1
536+
), f"Traceloop task should be transformed, got {len(spans)} spans"
540537

541538

542539
class TestOperationInference:
@@ -560,9 +557,9 @@ def test_infer_chat_operation(self, setup_tracer):
560557
if s.attributes and "gen_ai.system" in s.attributes
561558
]
562559

563-
assert len(spans_with_genai) >= 1, (
564-
f"Should have processed spans with gen_ai attributes, got {len(spans)} total spans"
565-
)
560+
assert (
561+
len(spans_with_genai) >= 1
562+
), f"Should have processed spans with gen_ai attributes, got {len(spans)} total spans"
566563

567564
def test_infer_embedding_operation(self, setup_tracer):
568565
"""Test that 'embedding' operation is inferred from span name."""
@@ -587,9 +584,9 @@ def test_infer_embedding_operation(self, setup_tracer):
587584
in s.attributes.get("gen_ai.request.model", "")
588585
]
589586

590-
assert len(spans_with_embedding) >= 1, (
591-
f"Should process embedding spans, got {len(spans)} total spans"
592-
)
587+
assert (
588+
len(spans_with_embedding) >= 1
589+
), f"Should process embedding spans, got {len(spans)} total spans"
593590

594591

595592
class TestComplexWorkflow:
@@ -673,9 +670,9 @@ def test_full_pirate_joke_workflow(self, setup_tracer):
673670
spans = exporter.get_finished_spans()
674671

675672
# Should have many spans (original mutated + synthetic)
676-
assert len(spans) >= 8, (
677-
f"Expected at least 8 spans in full workflow, got {len(spans)}"
678-
)
673+
assert (
674+
len(spans) >= 8
675+
), f"Expected at least 8 spans in full workflow, got {len(spans)}"
679676

680677
# Verify workflow span exists - look for spans with the workflow name
681678
workflow_spans = [
@@ -685,9 +682,9 @@ def test_full_pirate_joke_workflow(self, setup_tracer):
685682
and s.attributes.get("gen_ai.workflow.name")
686683
== "pirate_joke_generator"
687684
]
688-
assert len(workflow_spans) >= 1, (
689-
f"Should have workflow span, got {len(spans)} total spans, workflow_spans={len(workflow_spans)}"
690-
)
685+
assert (
686+
len(workflow_spans) >= 1
687+
), f"Should have workflow span, got {len(spans)} total spans, workflow_spans={len(workflow_spans)}"
691688

692689
# Verify all task names are present
693690
task_names = {"joke_creation", "signature_generation"}
@@ -698,9 +695,9 @@ def test_full_pirate_joke_workflow(self, setup_tracer):
698695
if agent_name in task_names:
699696
found_tasks.add(agent_name)
700697

701-
assert len(found_tasks) >= 1, (
702-
f"Should find task spans, found: {found_tasks}"
703-
)
698+
assert (
699+
len(found_tasks) >= 1
700+
), f"Should find task spans, found: {found_tasks}"
704701

705702
# Verify no traceloop.* attributes remain (mutation)
706703
for span in spans:
@@ -711,9 +708,9 @@ def test_full_pirate_joke_workflow(self, setup_tracer):
711708
if k.startswith("traceloop.")
712709
and k != "_traceloop_processed"
713710
]
714-
assert len(traceloop_keys) == 0, (
715-
f"Span {span.name} should not have traceloop.* attributes"
716-
)
711+
assert (
712+
len(traceloop_keys) == 0
713+
), f"Span {span.name} should not have traceloop.* attributes"
717714

718715

719716
class TestEdgeCases:

util/opentelemetry-util-genai/src/opentelemetry/util/genai/config.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -115,15 +115,17 @@ def parse_env() -> Settings:
115115
).strip()
116116
emit_legacy_event = legacy_event_flag.lower() in {"1", "true", "yes"}
117117

118-
evaluation_sample_rate = os.environ.get(OTEL_INSTRUMENTATION_GENAI_EVALUATION_SAMPLE_RATE)
118+
evaluation_sample_rate = os.environ.get(
119+
OTEL_INSTRUMENTATION_GENAI_EVALUATION_SAMPLE_RATE
120+
)
119121
if evaluation_sample_rate is None or evaluation_sample_rate.strip() == "":
120122
evaluation_sample_rate = 1.0
121123
try:
122124
evaluation_sample_rate = float(evaluation_sample_rate)
123125
except ValueError:
124-
evaluation_sample_rate = 1.0
126+
evaluation_sample_rate = 1.0
125127
if evaluation_sample_rate < 0.0:
126-
evaluation_sample_rate = 0.0
128+
evaluation_sample_rate = 0.0
127129
if evaluation_sample_rate > 1.0:
128130
evaluation_sample_rate = 1.0
129131

util/opentelemetry-util-genai/src/opentelemetry/util/genai/emitters/span.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -112,8 +112,9 @@ def _apply_gen_ai_semconv_attributes(
112112
except Exception: # pragma: no cover - defensive
113113
pass
114114

115+
115116
def _apply_sampled_for_evaluation(
116-
span: Span,
117+
span: Span,
117118
is_sampled: bool,
118119
) -> None:
119120
span.set_attribute("gen_ai.evaluation.sampled", is_sampled)
@@ -324,7 +325,9 @@ def on_start(
324325
self._apply_start_attrs(invocation)
325326

326327
def on_end(self, invocation: LLMInvocation | EmbeddingInvocation) -> None:
327-
_apply_sampled_for_evaluation(invocation.span, invocation.sample_for_evaluation)# type: ignore[override]
328+
_apply_sampled_for_evaluation(
329+
invocation.span, invocation.sample_for_evaluation
330+
) # type: ignore[override]
328331
if isinstance(invocation, Workflow):
329332
self._finish_workflow(invocation)
330333
elif isinstance(invocation, (AgentCreation, AgentInvocation)):

0 commit comments

Comments
 (0)