Skip to content

Commit 84a79a8

Browse files
authored
FFM-10837 Harden Metrics / Support anonymous targets (#88)
1 parent 8e27529 commit 84a79a8

File tree

9 files changed

+91
-29
lines changed

9 files changed

+91
-29
lines changed

docs/further_reading.md

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,15 @@ You can pass the configuration in as options when the SDK client is created.
2525
| pollInterval | with_poll_interval(120) | When running in stream mode, the interval in seconds that we poll for changes. | 60 |
2626
| maxAuthRetries | with_max_auth_retries(10) | The number of retry attempts to make if client authentication fails on a retryable HTTP error | 10 |
2727

28+
# Anonymous Target
29+
30+
If you do not want a `Target` to be sent to Harness servers, you can use the `anonymous` attribute.
31+
32+
```python
33+
target = Target(identifier='my_identifier', name='my_name', anonymous=True)
34+
```
35+
36+
2837
## Logging Configuration
2938
The SDK provides a logger that wraps the standard python logging package. You can import and use it with:
3039
```python

featureflags/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,4 @@
22

33
__author__ = """Enver Bisevac"""
44
__email__ = "[email protected]"
5-
__version__ = '1.4.0'
5+
__version__ = '1.5.0'

featureflags/analytics.py

Lines changed: 68 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import time
22
from threading import Lock, Thread
3-
from typing import Dict, List, Union
3+
from typing import Dict, List, Union, Set
44
import concurrent.futures
55

66
import attr
@@ -23,7 +23,8 @@
2323
from .sdk_logging_codes import info_metrics_thread_started, \
2424
info_metrics_success, warn_post_metrics_failed, \
2525
info_metrics_thread_existed, info_metrics_target_exceeded, \
26-
warn_post_metrics_target_batch_failed, info_metrics_target_batch_success
26+
warn_post_metrics_target_batch_failed, info_metrics_target_batch_success, \
27+
info_evaluation_metrics_exceeded
2728
from .util import log
2829

2930
FF_METRIC_TYPE = 'FFMETRICS'
@@ -33,7 +34,7 @@
3334
VARIATION_VALUE_ATTRIBUTE = 'variationValue'
3435
TARGET_ATTRIBUTE = 'target'
3536
SDK_VERSION_ATTRIBUTE = 'SDK_VERSION'
36-
SDK_VERSION = '1.4.0'
37+
SDK_VERSION = '1.5.0'
3738
SDK_TYPE_ATTRIBUTE = 'SDK_TYPE'
3839
SDK_TYPE = 'server'
3940
SDK_LANGUAGE_ATTRIBUTE = 'SDK_LANGUAGE'
@@ -64,13 +65,28 @@ def __init__(self, config: Config, client: AuthenticatedClient,
6465
self._config = config
6566
self._client = client
6667
self._environment = environment
68+
69+
# Evaluation metrics
6770
self._data: Dict[str, AnalyticsEvent] = {}
71+
# This allows for up to 2K flags with 5 variations each per interval
72+
self._max_evaluation_metrics = 10000
73+
self._max_evaluation_metrics_exceeded = False
74+
75+
# Target metrics - batch based
6876
self._target_data_batches: List[Dict[str, MetricTargetData]] = [{}]
69-
self._max_number_of_batches = 200
70-
self._max_batch_size = 1000
71-
self._current_batch_index = 0
77+
78+
# This allows for 100k unique targets per interval
79+
self._max_number_of_target_batches = 100
80+
self._max_target_batch_size = 1000
81+
self._current_target_batch_index = 0
7282
self.max_target_data_exceeded = False
7383

84+
# Allow us to track targets for the life of the SDK instance, to
85+
# prevent sending the same target more than once. This also helps
86+
# unique targets get processed each interval, because duplicate
87+
# targets are not making up the 100K limit.
88+
self._seen_targets: Set[str] = set()
89+
7490
self._running = False
7591
self._runner = Thread(target=self._sync)
7692
self._runner.daemon = True
@@ -86,28 +102,49 @@ def enqueue(self, target: Target, identifier: str,
86102

87103
self._lock.acquire()
88104
try:
89-
# Store unique evaluation events. We map a unique evaluation
90-
# event to its count.
91-
unique_evaluation_key = self.get_key(event)
92-
if unique_evaluation_key in self._data:
93-
self._data[unique_evaluation_key].count += 1
105+
# Check if adding a new metric would exceed the 10,000 limit
106+
if len(self._data) < self._max_evaluation_metrics:
107+
108+
# Store unique evaluation events. We map a unique evaluation
109+
# event to its count.
110+
unique_evaluation_key = self.get_key(event)
111+
if unique_evaluation_key in self._data:
112+
self._data[unique_evaluation_key].count += 1
113+
else:
114+
event.count = 1
115+
self._data[unique_evaluation_key] = event
94116
else:
95-
event.count = 1
96-
self._data[unique_evaluation_key] = event
117+
if not self._max_evaluation_metrics_exceeded:
118+
self._max_evaluation_metrics_exceeded = True
119+
info_evaluation_metrics_exceeded()
120+
121+
# Don't store this target if it is anonymous. Note, we currently
122+
# don't do this check above for evaluation metrics, because we use
123+
# the global target.
124+
if target.anonymous:
125+
return
126+
127+
unique_target_key = self.get_target_key(event)
97128

98-
# Check if we're on our final batch - if we are, and we've
129+
# If we've seen this target before, don't process it
130+
if unique_target_key in self._seen_targets:
131+
return
132+
133+
self._seen_targets.add(unique_target_key)
134+
135+
# Check if we're on our final target batch - if we are, and we've
99136
# exceeded the max batch size just return early.
100-
if len(self._target_data_batches) >= self._max_number_of_batches:
137+
if len(self._target_data_batches) >= \
138+
self._max_number_of_target_batches:
101139
if len(self._target_data_batches[
102-
self._current_batch_index]) >= \
103-
self._max_batch_size:
140+
self._current_target_batch_index]) >= \
141+
self._max_target_batch_size:
104142
if not self.max_target_data_exceeded:
105143
self.max_target_data_exceeded = True
106144
info_metrics_target_exceeded()
107145
return
108146

109147
if event.target is not None and not event.target.anonymous:
110-
unique_target_key = self.get_target_key(event)
111148

112149
# Store unique targets. If the target already exists
113150
# in any of the batches, don't continue processing it
@@ -118,16 +155,17 @@ def enqueue(self, target: Target, identifier: str,
118155
# If we've exceeded the max batch size for the current
119156
# batch, then create a new batch and start using it.
120157
if len(self._target_data_batches[
121-
self._current_batch_index]) >= self._max_batch_size:
158+
self._current_target_batch_index]) >= \
159+
self._max_target_batch_size:
122160
self._target_data_batches.append({})
123-
self._current_batch_index += 1
161+
self._current_target_batch_index += 1
124162

125163
target_name = event.target.name
126164
# If the target has no name use the identifier
127165
if not target_name:
128166
target_name = event.target.identifier
129167
self._target_data_batches[
130-
self._current_batch_index][unique_target_key] = \
168+
self._current_target_batch_index][unique_target_key] = \
131169
MetricTargetData(
132170
identifier=event.target.identifier,
133171
name=target_name,
@@ -209,8 +247,9 @@ def _send_data(self) -> None:
209247
finally:
210248
self._data = {}
211249
self._target_data_batches = [{}]
212-
self._current_batch_index = 0
250+
self._current_target_batch_index = 0
213251
self.max_target_data_exceeded = False
252+
self._max_evaluation_metrics_exceeded = False
214253
self._lock.release()
215254

216255
body: Metrics = Metrics(target_data=target_data,
@@ -290,6 +329,13 @@ def process_target(self, target_data, unique_target):
290329
)
291330
target_data.append(td)
292331

332+
def is_target_seen(self, target: AnalyticsEvent) -> bool:
333+
unique_target_key = self.get_target_key(target)
334+
335+
with self._lock:
336+
seen = unique_target_key in self._seen_targets
337+
return seen
338+
293339
def close(self) -> None:
294340
self._running = False
295341
if len(self._data) > 0:

featureflags/client.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -316,8 +316,8 @@ def number_variation(self, identifier: str, target: Target,
316316
return default
317317

318318
def int_or_float_variation(self, identifier: str, target: Target,
319-
default: Union[float, int]) -> Union[
320-
float, int]:
319+
default: Union[float, int]) -> \
320+
Union[float, int]:
321321

322322
# If initialization has failed, then return the default variation
323323
# immediately

featureflags/evaluations/evaluator.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
STARTS_WITH_OPERATOR)
1515
from featureflags.evaluations.distribution import Distribution
1616
from featureflags.evaluations.enum import FeatureState
17-
from featureflags.evaluations.feature import FeatureConfig
17+
from featureflags.evaluations.feature import FeatureConfig, FeatureConfigKind
1818
from featureflags.evaluations.serving_rule import ServingRule, ServingRules
1919
from featureflags.evaluations.variation import Variation
2020
from featureflags.evaluations.variation_map import VariationMap
@@ -38,7 +38,7 @@ class Evaluator(object):
3838
def __init__(self, provider: QueryInterface):
3939
self.provider = provider
4040

41-
def get_kind(self, identifier) -> Optional[str]:
41+
def get_kind(self, identifier) -> Optional[FeatureConfigKind]:
4242
fc = self.provider.get_flag(identifier)
4343
if not fc:
4444
return None

featureflags/sdk_logging_codes.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,8 @@ def get_sdk_code_message(key):
4141
"analytics interval will not be sent",
4242
7005: "Target metrics batches succeeded:",
4343
7006: "Target metrics batch/batches failed:",
44+
7007: "Evaluation metrics exceeded max size, remaining unique "
45+
"evaluations for this analytics interval will not be sent",
4446
# SDK_CACHE_8xxx
4547
8005: "Fetching feature by identifier attempt",
4648
8006: "Fetching segment by identifier attempt",
@@ -127,6 +129,10 @@ def info_metrics_target_batch_success(message):
127129
log.info(sdk_err_msg(7005, message))
128130

129131

132+
def info_evaluation_metrics_exceeded():
133+
log.info(sdk_err_msg(7007))
134+
135+
130136
def info_metrics_thread_existed():
131137
log.info(sdk_err_msg(7001))
132138

setup.cfg

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
[bumpversion]
2-
current_version = 1.4.0
2+
current_version = 1.5.0
33
commit = True
44
tag = True
55

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,6 @@
5757
test_suite="tests",
5858
tests_require=test_requirements,
5959
url="https://github.com/harness/ff-python-server-sdk",
60-
version='1.4.0',
60+
version='1.5.0',
6161
zip_safe=False,
6262
)

tests/unit/test_sdk_logging_codes.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ def test_logs_dont_raise_exception():
1616
sdk_codes.info_metrics_thread_started(10)
1717
sdk_codes.info_metrics_thread_existed()
1818
sdk_codes.info_metrics_success()
19+
sdk_codes.info_evaluation_metrics_exceeded()
1920
sdk_codes.info_metrics_target_exceeded()
2021
sdk_codes.warn_auth_failed_srv_defaults()
2122
sdk_codes.warn_failed_init_auth_error()

0 commit comments

Comments
 (0)