Skip to content

Commit a41bb69

Browse files
authored
Merge branch 'main' into genai-utils-metrics
2 parents dde7b5e + 5279805 commit a41bb69

File tree

44 files changed

+1596
-117
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+1596
-117
lines changed

CHANGELOG.md

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
1111
1212
## Unreleased
1313

14+
- `opentelemetry-instrumentation-requests`, `opentelemetry-instrumentation-wsgi`, `opentelemetry-instrumentation-asgi` Detect synthetic sources on requests, ASGI, and WSGI.
15+
([#3674](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3674))
16+
1417
### Added
1518

1619
- `opentelemetry-instrumentation-aiohttp-client`: add support for url exclusions via `OTEL_PYTHON_EXCLUDED_URLS` / `OTEL_PYTHON_AIOHTTP_CLIENT_EXCLUDED_URLS`
@@ -28,6 +31,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
2831
- `opentelemetry-instrumentation-aiohttp-server`: add support for custom header captures via `OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST` and `OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE`
2932
([#3916](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3916))
3033
- `opentelemetry-instrumentation-redis`: add support for `suppress_instrumentation` context manager for both sync and async Redis clients and pipelines
34+
- `opentelemetry-instrumentation-django`: improve docs for response_hook with examples of providing attributes from middlewares
35+
([#3923](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3923))
36+
- Update for Log SDK breaking changes. Rename InMemoryLogExporter to InMemoryLogRecordExporter in several tests
37+
([#3589](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3589))
38+
- opentelemetry-instrumentation: allow to skip all instrumentations loading with a wildcard
39+
([#3967](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3967))
3140

3241
### Fixed
3342

@@ -72,6 +81,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
7281
([#3796](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3796))
7382
- `opentelemetry-instrumentation-fastapi`: Fix handling of APIRoute subclasses
7483
([#3681](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3681))
84+
- `opentelemetry-instrumentation-flask`: Fix exemplars generation for `http.server.request.duration` and `http.server.duration` metrics
85+
([#3912](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3912))
7586

7687
### Added
7788

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,7 @@ For more information about the maintainer role, see the [community repository](h
124124
- [Dylan Russell](https://github.com/dylanrussell), Google
125125
- [Emídio Neto](https://github.com/emdneto), PicPay
126126
- [Jeremy Voss](https://github.com/jeremydvoss), Microsoft
127+
- [Liudmila Molkova](https://github.com/lmolkova), Grafana Labs
127128
- [Owais Lone](https://github.com/owais), Splunk
128129
- [Pablo Collins](https://github.com/pmcollins), Splunk
129130
- [Sanket Mehta](https://github.com/sanketmehta28), Cisco

instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/otel_mocker.py

Lines changed: 16 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,21 @@
2222
)
2323
from opentelemetry.metrics import get_meter_provider, set_meter_provider
2424
from opentelemetry.sdk._logs import LoggerProvider
25-
from opentelemetry.sdk._logs.export import (
26-
InMemoryLogExporter,
27-
SimpleLogRecordProcessor,
28-
)
25+
26+
# Backward compatibility for InMemoryLogExporter -> InMemoryLogRecordExporter rename
27+
try:
28+
from opentelemetry.sdk._logs.export import ( # pylint: disable=no-name-in-module
29+
InMemoryLogRecordExporter,
30+
SimpleLogRecordProcessor,
31+
)
32+
except ImportError:
33+
# Fallback to old name for compatibility with older SDK versions
34+
from opentelemetry.sdk._logs.export import (
35+
InMemoryLogExporter as InMemoryLogRecordExporter,
36+
)
37+
from opentelemetry.sdk._logs.export import (
38+
SimpleLogRecordProcessor,
39+
)
2940
from opentelemetry.sdk.metrics import MeterProvider
3041
from opentelemetry.sdk.metrics._internal.export import InMemoryMetricReader
3142
from opentelemetry.sdk.trace import TracerProvider
@@ -114,7 +125,7 @@ def data(self):
114125
class OTelMocker:
115126
def __init__(self):
116127
self._snapshot = None
117-
self._logs = InMemoryLogExporter()
128+
self._logs = InMemoryLogRecordExporter()
118129
self._traces = InMemorySpanExporter()
119130
self._metrics = InMemoryMetricReader()
120131
self._spans = []

instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
from google.genai.types import GenerateContentConfig
2121
from pydantic import BaseModel, Field
2222

23-
from opentelemetry._events import Event
2423
from opentelemetry.instrumentation._semconv import (
2524
_OpenTelemetrySemanticConventionStability,
2625
_OpenTelemetryStabilitySignalType,
@@ -324,7 +323,7 @@ def test_new_semconv_record_completion_as_log(self):
324323
event.attributes,
325324
)
326325
else:
327-
attrs = {
326+
expected_event_attributes = {
328327
gen_ai_attributes.GEN_AI_INPUT_MESSAGES: (
329328
{
330329
"role": "user",
@@ -346,31 +345,27 @@ def test_new_semconv_record_completion_as_log(self):
346345
{"content": sys_instr, "type": "text"},
347346
),
348347
}
349-
expected_event = Event(
350-
"gen_ai.client.inference.operation.details",
351-
attributes=attrs,
352-
)
353348
self.assertEqual(
354349
event.attributes[
355350
gen_ai_attributes.GEN_AI_INPUT_MESSAGES
356351
],
357-
expected_event.attributes[
352+
expected_event_attributes[
358353
gen_ai_attributes.GEN_AI_INPUT_MESSAGES
359354
],
360355
)
361356
self.assertEqual(
362357
event.attributes[
363358
gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES
364359
],
365-
expected_event.attributes[
360+
expected_event_attributes[
366361
gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES
367362
],
368363
)
369364
self.assertEqual(
370365
event.attributes[
371366
gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS
372367
],
373-
expected_event.attributes[
368+
expected_event_attributes[
374369
gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS
375370
],
376371
)

instrumentation-genai/opentelemetry-instrumentation-openai-v2/CHANGELOG.md

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77

88
## Unreleased
99

10+
## Version 2.2b0 (2025-11-25)
11+
12+
- Fix service tier attribute names: use `GEN_AI_OPENAI_REQUEST_SERVICE_TIER` for request
13+
attributes and `GEN_AI_OPENAI_RESPONSE_SERVICE_TIER` for response attributes.
14+
([#3920](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3920))
1015
- Added support for OpenAI embeddings instrumentation
1116
([#3461](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3461))
1217
- Record prompt and completion events regardless of span sampling decision.
1318
([#3226](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3226))
19+
- Filter out attributes with the value of NotGiven instances
20+
([#3760](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3760))
1421
- Migrate off the deprecated events API to use the logs API
1522
([#3625](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3628))
1623

@@ -34,4 +41,4 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
3441
([#2925](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2925))
3542

3643
- Initial OpenAI instrumentation
37-
([#2759](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2759))
44+
([#2759](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2759))

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/patch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -370,7 +370,7 @@ def _set_response_attributes(
370370
if getattr(result, "service_tier", None):
371371
set_span_attribute(
372372
span,
373-
GenAIAttributes.GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
373+
GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
374374
result.service_tier,
375375
)
376376

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/utils.py

Lines changed: 18 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -12,12 +12,14 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15+
from __future__ import annotations
16+
1517
from os import environ
16-
from typing import Mapping, Optional, Union
18+
from typing import Mapping
1719
from urllib.parse import urlparse
1820

1921
from httpx import URL
20-
from openai import NOT_GIVEN
22+
from openai import NotGiven
2123

2224
from opentelemetry._logs import LogRecord
2325
from opentelemetry.semconv._incubating.attributes import (
@@ -179,8 +181,12 @@ def is_streaming(kwargs):
179181
return non_numerical_value_is_set(kwargs.get("stream"))
180182

181183

182-
def non_numerical_value_is_set(value: Optional[Union[bool, str]]):
183-
return bool(value) and value != NOT_GIVEN
184+
def non_numerical_value_is_set(value: bool | str | NotGiven | None):
185+
return bool(value) and value_is_set(value)
186+
187+
188+
def value_is_set(value):
189+
return value is not None and not isinstance(value, NotGiven)
184190

185191

186192
def get_llm_request_attributes(
@@ -230,8 +236,13 @@ def get_llm_request_attributes(
230236
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
231237
] = response_format
232238

239+
# service_tier can be passed directly or in extra_body (in SDK 1.26.0 it's via extra_body)
233240
service_tier = kwargs.get("service_tier")
234-
attributes[GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER] = (
241+
if service_tier is None:
242+
extra_body = kwargs.get("extra_body")
243+
if isinstance(extra_body, Mapping):
244+
service_tier = extra_body.get("service_tier")
245+
attributes[GenAIAttributes.GEN_AI_OPENAI_REQUEST_SERVICE_TIER] = (
235246
service_tier if service_tier != "auto" else None
236247
)
237248

@@ -252,8 +263,8 @@ def get_llm_request_attributes(
252263

253264
set_server_address_and_port(client_instance, attributes)
254265

255-
# filter out None values
256-
return {k: v for k, v in attributes.items() if v is not None}
266+
# filter out values not set
267+
return {k: v for k, v in attributes.items() if value_is_set(v)}
257268

258269

259270
def handle_span_exception(span, error):

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,4 +12,4 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
__version__ = "2.2b0.dev"
15+
__version__ = "2.3b0.dev"
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
## Recording calls
2+
3+
If you need to record calls you need to export the `OPENAI_API_KEY` as environment variable.
4+
Since tox blocks environment variables by default you need to override its configuration to let them pass:
5+
6+
```
7+
export TOX_OVERRIDE=testenv.pass_env=OPENAI_API_KEY
8+
```
9+
10+
We are not adding it to tox.ini because of security concerns.
Lines changed: 144 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
interactions:
2+
- request:
3+
body: |-
4+
{
5+
"messages": [
6+
{
7+
"role": "user",
8+
"content": "Say this is a test"
9+
}
10+
],
11+
"model": "gpt-4o-mini",
12+
"stream": false
13+
}
14+
headers:
15+
accept:
16+
- application/json
17+
accept-encoding:
18+
- gzip, deflate
19+
authorization:
20+
- Bearer test_openai_api_key
21+
connection:
22+
- keep-alive
23+
content-length:
24+
- '106'
25+
content-type:
26+
- application/json
27+
host:
28+
- api.openai.com
29+
user-agent:
30+
- OpenAI/Python 1.109.1
31+
x-stainless-arch:
32+
- x64
33+
x-stainless-async:
34+
- 'false'
35+
x-stainless-lang:
36+
- python
37+
x-stainless-os:
38+
- Linux
39+
x-stainless-package-version:
40+
- 1.109.1
41+
x-stainless-read-timeout:
42+
- '600'
43+
x-stainless-retry-count:
44+
- '0'
45+
x-stainless-runtime:
46+
- CPython
47+
x-stainless-runtime-version:
48+
- 3.10.12
49+
method: POST
50+
uri: https://api.openai.com/v1/chat/completions
51+
response:
52+
body:
53+
string: |-
54+
{
55+
"id": "chatcmpl-CZDvsSHdMnAgYuQ8J81NMgOK2wfam",
56+
"object": "chat.completion",
57+
"created": 1762511072,
58+
"model": "gpt-4o-mini-2024-07-18",
59+
"choices": [
60+
{
61+
"index": 0,
62+
"message": {
63+
"role": "assistant",
64+
"content": "This is a test. How can I assist you today?",
65+
"refusal": null,
66+
"annotations": []
67+
},
68+
"logprobs": null,
69+
"finish_reason": "stop"
70+
}
71+
],
72+
"usage": {
73+
"prompt_tokens": 12,
74+
"completion_tokens": 12,
75+
"total_tokens": 24,
76+
"prompt_tokens_details": {
77+
"cached_tokens": 0,
78+
"audio_tokens": 0
79+
},
80+
"completion_tokens_details": {
81+
"reasoning_tokens": 0,
82+
"audio_tokens": 0,
83+
"accepted_prediction_tokens": 0,
84+
"rejected_prediction_tokens": 0
85+
}
86+
},
87+
"service_tier": "default",
88+
"system_fingerprint": "fp_560af6e559"
89+
}
90+
headers:
91+
CF-RAY:
92+
- 99ac1f128834ed5e-MXP
93+
Connection:
94+
- keep-alive
95+
Content-Type:
96+
- application/json
97+
Date:
98+
- Fri, 07 Nov 2025 10:24:32 GMT
99+
Server:
100+
- cloudflare
101+
Set-Cookie: test_set_cookie
102+
Strict-Transport-Security:
103+
- max-age=31536000; includeSubDomains; preload
104+
Transfer-Encoding:
105+
- chunked
106+
X-Content-Type-Options:
107+
- nosniff
108+
access-control-expose-headers:
109+
- X-Request-ID
110+
alt-svc:
111+
- h3=":443"; ma=86400
112+
cf-cache-status:
113+
- DYNAMIC
114+
content-length:
115+
- '850'
116+
openai-organization: test_openai_org_id
117+
openai-processing-ms:
118+
- '512'
119+
openai-project:
120+
- proj_Pf1eM5R55Z35wBy4rt8PxAGq
121+
openai-version:
122+
- '2020-10-01'
123+
x-envoy-upstream-service-time:
124+
- '797'
125+
x-openai-proxy-wasm:
126+
- v0.1
127+
x-ratelimit-limit-requests:
128+
- '10000'
129+
x-ratelimit-limit-tokens:
130+
- '10000000'
131+
x-ratelimit-remaining-requests:
132+
- '9999'
133+
x-ratelimit-remaining-tokens:
134+
- '9999993'
135+
x-ratelimit-reset-requests:
136+
- 6ms
137+
x-ratelimit-reset-tokens:
138+
- 0s
139+
x-request-id:
140+
- req_9eac1833161f4ac89019c12f24002ef4
141+
status:
142+
code: 200
143+
message: OK
144+
version: 1

0 commit comments

Comments
 (0)