Skip to content

Commit 8c60348

Browse files
committed
refactor: clean up logging - remove emojis, add prefixes, improve correlation
Changes: - main_traceloop.py: Replace emojis with [INIT], [FLUSH], [SUCCESS], [ERROR] prefixes - traceloop_span_processor.py: Add [TL_PROCESSOR] prefix to all logs - Simplify debug messages for better log correlation and filtering - Make logging more professional and concise - Maintain consistent format: [PREFIX] Message: key=value, key=value
1 parent f167264 commit 8c60348

File tree

2 files changed

+43
-43
lines changed
  • instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner
  • util/opentelemetry-util-genai-traceloop-translator/src/opentelemetry/util/genai/processor

2 files changed

+43
-43
lines changed

instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/main_traceloop.py

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@
114114
app_name=OTEL_SERVICE_NAME,
115115
resource_attributes=resource_attributes
116116
)
117-
print(" Traceloop SDK initialized with zero-code translator")
117+
print("[INIT] Traceloop SDK initialized with zero-code translator")
118118

119119

120120
def _configure_otlp_logging() -> None:
@@ -130,7 +130,7 @@ def _configure_otlp_logging() -> None:
130130
try:
131131
existing = get_logger_provider()
132132
if isinstance(existing, LoggerProvider):
133-
print(" LoggerProvider already configured")
133+
print("[INIT] LoggerProvider already configured")
134134
return
135135
except:
136136
pass
@@ -157,7 +157,7 @@ def _configure_otlp_logging() -> None:
157157
)
158158
logger_provider.add_log_record_processor(log_processor)
159159
set_logger_provider(logger_provider)
160-
print(f" OTLP logging configured with endpoint: {log_endpoint}")
160+
print(f"[INIT] OTLP logging configured, endpoint={log_endpoint}")
161161

162162

163163
# Configure logging for evaluation results
@@ -186,7 +186,7 @@ def _configure_otlp_logging() -> None:
186186
#
187187
# Note: langchain-core must be installed for message reconstruction to work,
188188
# but LangChain instrumentation is NOT needed.
189-
print(" Message reconstruction enabled in translator (no LangChain instrumentation needed)")
189+
print("[INIT] Message reconstruction enabled in translator (LangChain instrumentation not required)")
190190

191191
# ---------------------------------------------------------------------------
192192
# Sample data utilities
@@ -602,7 +602,7 @@ def main() -> None:
602602

603603
def flush_telemetry():
604604
"""Flush all OpenTelemetry providers before exit."""
605-
print("\n🔄 Flushing telemetry data...", flush=True)
605+
print("\n[FLUSH] Starting telemetry flush", flush=True)
606606

607607
# CRITICAL: Wait for all evaluations to complete before flushing
608608
# Evaluations run asynchronously in a background thread
@@ -613,54 +613,54 @@ def flush_telemetry():
613613
if handler:
614614
handler.wait_for_evaluations(200.0)
615615
except Exception as e:
616-
print(f" ⚠️ Could not wait for evaluations: {e}", flush=True)
616+
print(f"[FLUSH] Warning: Could not wait for evaluations: {e}", flush=True)
617617

618618
# Flush traces (Traceloop SDK uses OTel TracerProvider under the hood)
619619
try:
620620
from opentelemetry import trace
621621
tracer_provider = trace.get_tracer_provider()
622622
if hasattr(tracer_provider, "force_flush"):
623-
print("Flushing traces (traceloop.* and gen_ai.* spans)...", flush=True)
624-
tracer_provider.force_flush(timeout_millis=30000) # 30 seconds
623+
print("[FLUSH] Flushing traces (timeout=30s)", flush=True)
624+
tracer_provider.force_flush(timeout_millis=30000)
625625
except Exception as e:
626-
print(f" ⚠️ Could not flush traces: {e}", flush=True)
626+
print(f"[FLUSH] Warning: Could not flush traces: {e}", flush=True)
627627

628628
# Flush logs (if any emitters are using logs)
629629
try:
630630
from opentelemetry._logs import get_logger_provider
631631
logger_provider = get_logger_provider()
632632
if hasattr(logger_provider, "force_flush"):
633-
print("Flushing logs...", flush=True)
634-
logger_provider.force_flush(timeout_millis=30000) # 30 seconds
633+
print("[FLUSH] Flushing logs (timeout=30s)", flush=True)
634+
logger_provider.force_flush(timeout_millis=30000)
635635
except Exception as e:
636-
print(f" ⚠️ Could not flush logs: {e}", flush=True)
636+
print(f"[FLUSH] Warning: Could not flush logs: {e}", flush=True)
637637

638638
# Flush metrics
639639
try:
640640
from opentelemetry.metrics import get_meter_provider
641641
meter_provider = get_meter_provider()
642642
if hasattr(meter_provider, "force_flush"):
643-
print("Flushing metrics...", flush=True)
644-
meter_provider.force_flush(timeout_millis=30000) # 30 seconds
643+
print("[FLUSH] Flushing metrics (timeout=30s)", flush=True)
644+
meter_provider.force_flush(timeout_millis=30000)
645645
except Exception as e:
646-
print(f" ⚠️ Could not flush metrics: {e}", flush=True)
646+
print(f"[FLUSH] Warning: Could not flush metrics: {e}", flush=True)
647647

648648
# Give batch processors time to complete final export operations
649-
print("Waiting for final batch export...", flush=True)
649+
print("[FLUSH] Waiting for final batch export (5s)", flush=True)
650650
time.sleep(5)
651651

652-
print(" Telemetry flush complete!\n", flush=True)
652+
print("[FLUSH] Telemetry flush complete\n", flush=True)
653653

654654

655655
if __name__ == "__main__":
656656
exit_code = 0
657657
try:
658658
main()
659-
print("\n Workflow completed successfully!")
660-
print(" Traces exported with traceloop.* attributes")
661-
print(" Zero-code translator converted to gen_ai.* attributes")
659+
print("\n[SUCCESS] Workflow completed")
660+
print("[SUCCESS] Traces exported with traceloop.* attributes")
661+
print("[SUCCESS] Zero-code translator converted to gen_ai.* attributes")
662662
except Exception as e:
663-
print(f"\nERROR: Workflow failed: {e}", file=sys.stderr)
663+
print(f"\n[ERROR] Workflow failed: {e}", file=sys.stderr)
664664
import traceback
665665
traceback.print_exc()
666666
exit_code = 1

util/opentelemetry-util-genai-traceloop-translator/src/opentelemetry/util/genai/processor/traceloop_span_processor.py

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ def matches(
7474
return False
7575
except re.error:
7676
# Bad regex – treat as non-match but log once
77-
logging.warning("Invalid regex in match_scope: %s", pattern)
77+
logging.warning("[TL_PROCESSOR] Invalid regex in match_scope: %s", pattern)
7878
return False
7979
if self.match_attributes:
8080
for k, expected in self.match_attributes.items():
@@ -95,7 +95,7 @@ def _load_rules_from_env() -> List[TransformationRule]:
9595
data = json.loads(raw)
9696
rules_spec = data.get("rules") if isinstance(data, dict) else None
9797
if not isinstance(rules_spec, list):
98-
logging.warning("%s must contain a 'rules' list", _ENV_RULES)
98+
logging.warning("[TL_PROCESSOR] %s must contain a 'rules' list", _ENV_RULES)
9999
return []
100100
rules: List[TransformationRule] = []
101101
for r in rules_spec:
@@ -121,7 +121,7 @@ def _load_rules_from_env() -> List[TransformationRule]:
121121
)
122122
return rules
123123
except Exception as exc: # broad: we never want to break app startup
124-
logging.warning("Failed to parse %s: %s", _ENV_RULES, exc)
124+
logging.warning("[TL_PROCESSOR] Failed to parse %s: %s", _ENV_RULES, exc)
125125
return []
126126

127127

@@ -258,10 +258,10 @@ def _process_span_translation(self, span: ReadableSpan) -> Optional[Any]:
258258

259259
# Check if this span should be transformed
260260
if not self.span_filter(span):
261-
logger.debug("Span %s filtered out by span_filter", span.name)
261+
logger.debug("[TL_PROCESSOR] Span filtered: name=%s", span.name)
262262
return None
263263

264-
logger.debug("Processing span for transformation: %s (kind=%s)",
264+
logger.debug("[TL_PROCESSOR] Translating span: name=%s, kind=%s",
265265
span.name,
266266
span.attributes.get("traceloop.span.kind") if span.attributes else None)
267267

@@ -280,7 +280,7 @@ def _process_span_translation(self, span: ReadableSpan) -> Optional[Any]:
280280
applied_rule = rule
281281
break
282282
except Exception as match_err: # pragma: no cover - defensive
283-
logging.warning("Rule match error ignored: %s", match_err)
283+
logging.warning("[TL_PROCESSOR] Rule match error: %s", match_err)
284284

285285
sentinel = {"_traceloop_processed": True}
286286
# Decide which transformation config to apply
@@ -375,7 +375,7 @@ def on_end(self, span: ReadableSpan) -> None:
375375
# STEP 2: Check if this is an LLM span that needs evaluation
376376
if self._is_llm_span(span):
377377
_logger.debug(
378-
"🔍 TRACELOOP PROCESSOR: LLM span '%s' detected! Processing immediately for evaluations",
378+
"[TL_PROCESSOR] LLM span detected: %s, processing for evaluations",
379379
span.name
380380
)
381381
# Process LLM spans IMMEDIATELY - create synthetic span and trigger evaluations
@@ -386,16 +386,17 @@ def on_end(self, span: ReadableSpan) -> None:
386386
try:
387387
handler.stop_llm(invocation)
388388
_logger.debug(
389-
"🔍 TRACELOOP PROCESSOR: LLM invocation completed, evaluations should trigger"
389+
"[TL_PROCESSOR] LLM invocation completed: %s",
390+
span.name
390391
)
391392
except Exception as stop_err:
392393
_logger.warning(
393-
"Failed to stop LLM invocation: %s", stop_err
394+
"[TL_PROCESSOR] Failed to stop LLM invocation: %s", stop_err
394395
)
395396
else:
396397
# Non-LLM spans (tasks, workflows, tools) - buffer for optional batch processing
397398
_logger.debug(
398-
"🔍 TRACELOOP PROCESSOR: Non-LLM span '%s', buffering (%d in buffer)",
399+
"[TL_PROCESSOR] Non-LLM span buffered: %s (buffer_size=%d)",
399400
span.name,
400401
len(self._span_buffer) + 1
401402
)
@@ -404,7 +405,8 @@ def on_end(self, span: ReadableSpan) -> None:
404405
# Process buffer when root span arrives (optional, for synthetic spans of workflows)
405406
if span.parent is None and not self._processing_buffer:
406407
_logger.debug(
407-
"🔍 TRACELOOP PROCESSOR: ROOT SPAN detected, processing buffered non-LLM spans"
408+
"[TL_PROCESSOR] Root span detected, processing buffered spans (count=%d)",
409+
len(self._span_buffer)
408410
)
409411
self._processing_buffer = True
410412
try:
@@ -437,7 +439,7 @@ def on_end(self, span: ReadableSpan) -> None:
437439
except Exception as e:
438440
# Don't let transformation errors break the original span processing
439441
logging.warning(
440-
f"TraceloopSpanProcessor failed to transform span: {e}"
442+
"[TL_PROCESSOR] Span transformation failed: %s", e
441443
)
442444

443445
def _sort_spans_by_hierarchy(
@@ -519,16 +521,15 @@ def _is_llm_span(self, span: ReadableSpan) -> bool:
519521
)
520522

521523
if has_input_messages or has_output_messages:
522-
# This is a task/agent span with message data - PERFECT for evaluations!
523524
_logger.debug(
524-
"Span '%s' (kind=%s) has message data (input=%s, output=%s) - WILL EVALUATE",
525-
span.name, span_kind, has_input_messages, has_output_messages
525+
"[TL_PROCESSOR] Span evaluable (has_messages): name=%s, kind=%s",
526+
span.name, span_kind
526527
)
527528
return True
528529

529530
# PRIORITY 2: Check for explicit LLM span kind (even without messages, for compatibility)
530531
if span_kind == "llm":
531-
_logger.debug("Span '%s' has span_kind='llm' - WILL EVALUATE", span.name)
532+
_logger.debug("[TL_PROCESSOR] Span evaluable (kind=llm): name=%s", span.name)
532533
return True
533534

534535
# PRIORITY 3: Detect ReAct agent/task spans by kind
@@ -539,7 +540,7 @@ def _is_llm_span(self, span: ReadableSpan) -> bool:
539540
exclude_keywords = ["should_continue", "model_to_tools", "tools_to_model", "__start__", "__end__"]
540541
if not any(ex in span_name_lower for ex in exclude_keywords):
541542
_logger.debug(
542-
"Span '%s' (kind=%s) is agent/task/workflow - WILL EVALUATE",
543+
"[TL_PROCESSOR] Span evaluable (agent/task/workflow): name=%s, kind=%s",
543544
span.name, span_kind
544545
)
545546
return True
@@ -550,25 +551,24 @@ def _is_llm_span(self, span: ReadableSpan) -> bool:
550551
"gen_ai.request.model",
551552
"ai.model.name"
552553
]):
553-
_logger.debug("Span '%s' has model attribute - WILL EVALUATE", span.name)
554+
_logger.debug("[TL_PROCESSOR] Span evaluable (has_model_attr): name=%s", span.name)
554555
return True
555556

556557
# PRIORITY 5: Name-based detection for agent/specialist patterns
557558
# Match patterns like: flight_specialist, hotel_specialist, coordinator_agent, etc.
558559
agent_patterns = ["_specialist", "_agent", "coordinator", "synthesizer"]
559560
if any(pattern in span_name_lower for pattern in agent_patterns):
560-
_logger.debug("Span '%s' matches agent pattern - WILL EVALUATE", span.name)
561+
_logger.debug("[TL_PROCESSOR] Span evaluable (agent_pattern): name=%s", span.name)
561562
return True
562563

563564
# PRIORITY 6: Name-based detection for LLM providers (ChatOpenAI.chat, etc.)
564565
llm_indicators = ["chatopenai", "chatgoogleai", "chatanthropic", "chatvertexai", "openai.chat", "completion",
565566
"gpt-", "claude-", "gemini-", "llama-"]
566567
for indicator in llm_indicators:
567568
if indicator in span_name_lower:
568-
_logger.debug("Span '%s' matches LLM indicator '%s' - WILL EVALUATE", span.name, indicator)
569+
_logger.debug("[TL_PROCESSOR] Span evaluable (llm_indicator=%s): name=%s", indicator, span.name)
569570
return True
570571

571-
_logger.debug("Span '%s' is NOT an evaluation span (no indicators found)", span.name)
572572
return False
573573

574574
def _mutate_span_if_needed(self, span: ReadableSpan) -> None:
@@ -592,7 +592,7 @@ def _mutate_span_if_needed(self, span: ReadableSpan) -> None:
592592
applied_rule = rule
593593
break
594594
except Exception as match_err: # pragma: no cover - defensive
595-
logging.warning("Rule match error ignored: %s", match_err)
595+
logging.warning("[TL_PROCESSOR] Rule match error: %s", match_err)
596596

597597
# Decide which transformation config to apply
598598
if applied_rule is not None:

0 commit comments

Comments
 (0)