From e7d295f60d68ad5bf4b7831c1bf167e5bf9a7876 Mon Sep 17 00:00:00 2001 From: Matthew Keeler Date: Tue, 23 Apr 2024 15:08:12 -0400 Subject: [PATCH] feat: Add support for hooks --- docs/api-main.rst | 6 + ldclient/client.py | 110 ++++++++++++--- ldclient/config.py | 19 ++- ldclient/hook.py | 85 +++++++++++ ldclient/testing/test_ldclient_hooks.py | 179 ++++++++++++++++++++++++ setup.cfg | 2 +- 6 files changed, 382 insertions(+), 19 deletions(-) create mode 100644 ldclient/hook.py create mode 100644 ldclient/testing/test_ldclient_hooks.py diff --git a/docs/api-main.rst b/docs/api-main.rst index 90df0ec0..7e00c5e9 100644 --- a/docs/api-main.rst +++ b/docs/api-main.rst @@ -20,6 +20,12 @@ ldclient.config module :members: :special-members: __init__ +ldclient.hook module +-------------------------- + +.. automodule:: ldclient.hook + :members: + ldclient.evaluation module -------------------------- diff --git a/ldclient/client.py b/ldclient/client.py index c4708e89..42113667 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -2,7 +2,7 @@ This submodule contains the client class that provides most of the SDK functionality. """ -from typing import Optional, Any, Dict, Mapping, Union, Tuple, Callable +from typing import Optional, Any, Dict, Mapping, Union, Tuple, Callable, List from .impl import AnyNum @@ -15,6 +15,7 @@ from ldclient.config import Config from ldclient.context import Context from ldclient.feature_store import _FeatureStoreDataSetSorter +from ldclient.hook import Hook, EvaluationSeriesContext, _EvaluationWithHookResult from ldclient.evaluation import EvaluationDetail, FeatureFlagsState from ldclient.impl.big_segments import BigSegmentStoreManager from ldclient.impl.datasource.feature_requester import FeatureRequesterImpl @@ -187,8 +188,10 @@ def __init__(self, config: Config, start_wait: float=5): self._config = config self._config._validate() + self.__hooks_lock = ReadWriteLock() + self.__hooks = config.hooks # type: List[Hook] + self._event_processor = None - self._lock = Lock() self._event_factory_default = EventFactory(False) self._event_factory_with_reasons = EventFactory(True) @@ -395,8 +398,11 @@ def variation(self, key: str, context: Context, default: Any) -> Any: available from LaunchDarkly :return: the variation for the given context, or the ``default`` value if the flag cannot be evaluated """ - detail, _ = self._evaluate_internal(key, context, default, self._event_factory_default) - return detail.value + def evaluate(): + detail, _ = self._evaluate_internal(key, context, default, self._event_factory_default) + return _EvaluationWithHookResult(evaluation_detail=detail) + + return self.__evaluate_with_hooks(key=key, context=context, default_value=default, method="variation", block=evaluate).evaluation_detail.value def variation_detail(self, key: str, context: Context, default: Any) -> EvaluationDetail: """Calculates the value of a feature flag for a given context, and returns an object that @@ -412,8 +418,11 @@ def variation_detail(self, key: str, context: Context, default: Any) -> Evaluati :return: an :class:`ldclient.evaluation.EvaluationDetail` object that includes the feature flag value and evaluation reason """ - detail, _ = self._evaluate_internal(key, context, default, self._event_factory_with_reasons) - return detail + def evaluate(): + detail, _ = self._evaluate_internal(key, context, default, self._event_factory_with_reasons) + return _EvaluationWithHookResult(evaluation_detail=detail) + + return self.__evaluate_with_hooks(key=key, context=context, default_value=default, method="variation_detail", block=evaluate).evaluation_detail def migration_variation(self, key: str, context: Context, default_stage: Stage) -> Tuple[Stage, OpTracker]: """ @@ -429,17 +438,21 @@ def migration_variation(self, key: str, context: Context, default_stage: Stage) log.error(f"default stage {default_stage} is not a valid stage; using 'off' instead") default_stage = Stage.OFF - detail, flag = self._evaluate_internal(key, context, default_stage.value, self._event_factory_default) + def evaluate(): + detail, flag = self._evaluate_internal(key, context, default_stage.value, self._event_factory_default) + + if isinstance(detail.value, str): + stage = Stage.from_str(detail.value) + if stage is not None: + tracker = OpTracker(key, flag, context, detail, default_stage) + return _EvaluationWithHookResult(evaluation_detail=detail, results={'default_stage': stage, 'tracker': tracker}) - if isinstance(detail.value, str): - stage = Stage.from_str(detail.value) - if stage is not None: - tracker = OpTracker(key, flag, context, detail, default_stage) - return stage, tracker + detail = EvaluationDetail(default_stage.value, None, error_reason('WRONG_TYPE')) + tracker = OpTracker(key, flag, context, detail, default_stage) + return _EvaluationWithHookResult(evaluation_detail=detail, results={'default_stage': default_stage, 'tracker': tracker}) - detail = EvaluationDetail(default_stage.value, None, error_reason('WRONG_TYPE')) - tracker = OpTracker(key, flag, context, detail, default_stage) - return default_stage, tracker + hook_result = self.__evaluate_with_hooks(key=key, context=context, default_value=default_stage, method="migration_variation", block=evaluate) + return hook_result.results['default_stage'], hook_result.results['tracker'] def _evaluate_internal(self, key: str, context: Context, default: Any, event_factory) -> Tuple[EvaluationDetail, Optional[FeatureFlag]]: default = self._config.get_default(key, default) @@ -451,8 +464,7 @@ def _evaluate_internal(self, key: str, context: Context, default: Any, event_fac if self._store.initialized: log.warning("Feature Flag evaluation attempted before client has initialized - using last known values from feature store for feature key: " + key) else: - log.warning("Feature Flag evaluation attempted before client has initialized! Feature store unavailable - returning default: " - + str(default) + " for feature key: " + key) + log.warning("Feature Flag evaluation attempted before client has initialized! Feature store unavailable - returning default: " + str(default) + " for feature key: " + key) reason = error_reason('CLIENT_NOT_READY') self._send_event(event_factory.new_unknown_flag_event(key, context, default, reason)) return EvaluationDetail(default, None, reason), None @@ -583,6 +595,70 @@ def secure_mode_hash(self, context: Context) -> str: return "" return hmac.new(str(self._config.sdk_key).encode(), context.fully_qualified_key.encode(), hashlib.sha256).hexdigest() + def add_hook(self, hook: Hook): + """ + Add a hook to the client. In order to register a hook before the client starts, please use the `hooks` property of + `Config`. + + Hooks provide entrypoints which allow for observation of SDK functions. + + :param hook: + """ + if not isinstance(hook, Hook): + return + + self.__hooks_lock.lock() + self.__hooks.append(hook) + self.__hooks_lock.unlock() + + def __evaluate_with_hooks(self, key: str, context: Context, default_value: Any, method: str, block: Callable[[], _EvaluationWithHookResult]) -> _EvaluationWithHookResult: + """ + # evaluate_with_hook will run the provided block, wrapping it with evaluation hook support. + # + # :param key: + # :param context: + # :param default: + # :param method: + # :param block: + # :return: + """ + hooks = [] # type: List[Hook] + try: + self.__hooks_lock.rlock() + + if len(self.__hooks) == 0: + return block() + + hooks = self.__hooks.copy() + finally: + self.__hooks_lock.runlock() + + series_context = EvaluationSeriesContext(key=key, context=context, default_value=default_value, method=method) + hook_data = self.__execute_before_evaluation(hooks, series_context) + evaluation_result = block() + self.__execute_after_evaluation(hooks, series_context, hook_data, evaluation_result.evaluation_detail) + + return evaluation_result + + def __execute_before_evaluation(self, hooks: List[Hook], series_context: EvaluationSeriesContext) -> List[Any]: + return [ + self.__try_execute_stage("beforeEvaluation", hook.metadata.name, lambda: hook.before_evaluation(series_context, {})) + for hook in hooks + ] + + def __execute_after_evaluation(self, hooks: List[Hook], series_context: EvaluationSeriesContext, hook_data: List[Any], evaluation_detail: EvaluationDetail) -> List[Any]: + return [ + self.__try_execute_stage("afterEvaluation", hook.metadata.name, lambda: hook.after_evaluation(series_context, data, evaluation_detail)) + for (hook, data) in reversed(list(zip(hooks, hook_data))) + ] + + def __try_execute_stage(self, method: str, hook_name: str, block: Callable[[], dict]) -> Optional[dict]: + try: + return block() + except BaseException as e: + log.error(f"An error occurred in {method} of the hook {hook_name}: #{e}") + return None + @property def big_segment_store_status_provider(self) -> BigSegmentStoreStatusProvider: """ diff --git a/ldclient/config.py b/ldclient/config.py index f51ea42e..12539ce3 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -8,6 +8,7 @@ from threading import Event from ldclient.feature_store import InMemoryFeatureStore +from ldclient.hook import Hook from ldclient.impl.util import log, validate_application_info from ldclient.interfaces import BigSegmentStore, EventProcessor, FeatureStore, UpdateProcessor, DataSourceUpdateSink @@ -173,7 +174,8 @@ def __init__(self, wrapper_version: Optional[str]=None, http: HTTPConfig=HTTPConfig(), big_segments: Optional[BigSegmentsConfig]=None, - application: Optional[dict]=None): + application: Optional[dict]=None, + hooks: Optional[List[Hook]]=None): """ :param sdk_key: The SDK key for your LaunchDarkly account. This is always required. :param base_uri: The base URL for the LaunchDarkly server. Most users should use the default @@ -238,6 +240,7 @@ def __init__(self, :param http: Optional properties for customizing the client's HTTP/HTTPS behavior. See :class:`HTTPConfig`. :param application: Optional properties for setting application metadata. See :py:attr:`~application` + :param hooks: Hooks provide entrypoints which allow for observation of SDK functions. """ self.__sdk_key = sdk_key @@ -270,6 +273,7 @@ def __init__(self, self.__http = http self.__big_segments = BigSegmentsConfig() if not big_segments else big_segments self.__application = validate_application_info(application or {}, log) + self.__hooks = [hook for hook in hooks if isinstance(hook, Hook)] if hooks else [] self._data_source_update_sink: Optional[DataSourceUpdateSink] = None def copy_with_new_sdk_key(self, new_sdk_key: str) -> 'Config': @@ -442,6 +446,19 @@ def application(self) -> dict: """ return self.__application + @property + def hooks(self) -> List[Hook]: + """ + Initial set of hooks for the client. + + Hooks provide entrypoints which allow for observation of SDK functions. + + LaunchDarkly provides integration packages, and most applications will + not need to implement their own hooks. Refer to the + `launchdarkly-server-sdk-otel`. + """ + return self.__hooks + @property def data_source_update_sink(self) -> Optional[DataSourceUpdateSink]: """ diff --git a/ldclient/hook.py b/ldclient/hook.py new file mode 100644 index 00000000..3f594fc4 --- /dev/null +++ b/ldclient/hook.py @@ -0,0 +1,85 @@ +from ldclient.context import Context +from ldclient.evaluation import EvaluationDetail + +from abc import ABCMeta, abstractmethod, abstractproperty +from dataclasses import dataclass +from typing import Any + + +@dataclass +class EvaluationSeriesContext: + """ + Contextual information that will be provided to handlers during evaluation + series. + """ + + key: str #: The flag key used to trigger the evaluation. + context: Context #: The context used during evaluation. + default_value: Any #: The default value provided to the evaluation method + method: str #: The string version of the method which triggered the evaluation series. + + +@dataclass +class Metadata: + """ + Metadata data class used for annotating hook implementations. + """ + + name: str #: A name representing a hook instance. + + +class Hook: + """ + Abstract class for extending SDK functionality via hooks. + + All provided hook implementations **MUST** inherit from this class. + + This class includes default implementations for all hook handlers. This + allows LaunchDarkly to expand the list of hook handlers without breaking + customer integrations. + """ + __metaclass__ = ABCMeta + + @abstractproperty + def metadata(self) -> Metadata: + """ + Get metadata about the hook implementation. + """ + return Metadata(name='UNDEFINED') + + @abstractmethod + def before_evaluation(self, series_context: EvaluationSeriesContext, data: dict) -> dict: + """ + The before method is called during the execution of a variation method + before the flag value has been determined. The method is executed + synchronously. + + :param series_context: Contains information about the evaluation being performed. This is not mutable. + :param data: A record associated with each stage of hook invocations. + Each stage is called with the data of the previous stage for a series. + The input record should not be modified. + :return: Data to use when executing the next state of the hook in the evaluation series. + """ + return data + + @abstractmethod + def after_evaluation(self, series_context: EvaluationSeriesContext, data: dict, detail: EvaluationDetail) -> dict: + """ + The after method is called during the execution of the variation method + after the flag value has been determined. The method is executed + synchronously. + + :param series_context: Contains read-only information about the + evaluation being performed. + :param data: A record associated with each stage of hook invocations. + Each stage is called with the data of the previous stage for a series. + :param detail: The result of the evaluation. This value should not be modified. + :return: Data to use when executing the next state of the hook in the evaluation series. + """ + return data + + +@dataclass +class _EvaluationWithHookResult: + evaluation_detail: EvaluationDetail + results: Any = None diff --git a/ldclient/testing/test_ldclient_hooks.py b/ldclient/testing/test_ldclient_hooks.py new file mode 100644 index 00000000..41c095c4 --- /dev/null +++ b/ldclient/testing/test_ldclient_hooks.py @@ -0,0 +1,179 @@ +from ldclient.evaluation import EvaluationDetail +from ldclient import LDClient, Config, Context +from ldclient.hook import Hook, Metadata, EvaluationSeriesContext +from ldclient.migrations import Stage + +from ldclient.integrations.test_data import TestData + +from typing import Callable, Any + + +def record(label, log): + def inner(*args, **kwargs): + log.append(label) + + return inner + + +class MockHook(Hook): + def __init__(self, before_evaluation: Callable[[EvaluationSeriesContext, Any], dict], after_evaluation: Callable[[EvaluationSeriesContext, Any, EvaluationDetail], dict]): + self.__before_evaluation = before_evaluation + self.__after_evaluation = after_evaluation + + @property + def metadata(self) -> Metadata: + return Metadata(name='test-hook') + + def before_evaluation(self, series_context: EvaluationSeriesContext, data): + return self.__before_evaluation(series_context, data) + + def after_evaluation(self, series_context: EvaluationSeriesContext, data, detail: EvaluationDetail): + return self.__after_evaluation(series_context, data, detail) + + +user = Context.from_dict({'key': 'userkey', 'kind': 'user'}) + + +def test_verify_hook_execution_order(): + calls = [] + configHook1 = MockHook(before_evaluation=record('configHook1::before', calls), after_evaluation=record('configHook1::after', calls)) + configHook2 = MockHook(before_evaluation=record('configHook2::before', calls), after_evaluation=record('configHook2::after', calls)) + + clientHook1 = MockHook(before_evaluation=record('clientHook1::before', calls), after_evaluation=record('clientHook1::after', calls)) + clientHook2 = MockHook(before_evaluation=record('clientHook2::before', calls), after_evaluation=record('clientHook2::after', calls)) + + config = Config('SDK_KEY', update_processor_class=TestData.data_source(), send_events=False, hooks=[configHook1, configHook2]) + client = LDClient(config=config) + client.add_hook(clientHook1) + client.add_hook(clientHook2) + + client.variation('invalid', user, False) + + assert calls == ['configHook1::before', 'configHook2::before', 'clientHook1::before', 'clientHook2::before', 'clientHook2::after', 'clientHook1::after', 'configHook2::after', 'configHook1::after'] + + +def test_ignores_invalid_hooks(): + calls = [] + hook = MockHook(before_evaluation=record('before', calls), after_evaluation=record('after', calls)) + + config = Config('SDK_KEY', update_processor_class=TestData.data_source(), send_events=False, hooks=[True, hook, 42]) + client = LDClient(config=config) + client.add_hook("Hook, Hook, give us the Hook!") + client.add_hook(hook) + client.add_hook(None) + + client.variation('invalid', user, False) + + assert calls == ['before', 'before', 'after', 'after'] + + +def test_after_evaluation_receives_evaluation_detail(): + details = [] + hook = MockHook(before_evaluation=record('before', []), after_evaluation=lambda series_context, data, detail: details.append(detail)) + + td = TestData.data_source() + td.update(td.flag('flag-key').variation_for_all(True)) + + config = Config('SDK_KEY', update_processor_class=td, send_events=False, hooks=[hook]) + client = LDClient(config=config) + client.variation('flag-key', user, False) + + assert len(details) == 1 + assert details[0].value is True + assert details[0].variation_index == 0 + + +def test_passing_data_from_before_to_after(): + calls = [] + hook = MockHook(before_evaluation=lambda series_context, data: "from before", after_evaluation=lambda series_context, data, detail: calls.append(data)) + + config = Config('SDK_KEY', update_processor_class=TestData.data_source(), send_events=False, hooks=[hook]) + client = LDClient(config=config) + client.variation('flag-key', user, False) + + assert len(calls) == 1 + assert calls[0] == "from before" + + +def test_exception_in_before_passes_none(): + def raise_exception(series_context, data): + raise Exception("error") + + calls = [] + hook = MockHook(before_evaluation=raise_exception, after_evaluation=lambda series_context, data, detail: calls.append(data)) + + config = Config('SDK_KEY', update_processor_class=TestData.data_source(), send_events=False, hooks=[hook]) + client = LDClient(config=config) + client.variation('flag-key', user, False) + + assert len(calls) == 1 + assert calls[0] is None + + +def test_exceptions_do_not_affect_data_passing_order(): + def raise_exception(series_context, data): + raise Exception("error") + + calls = [] + hook1 = MockHook(before_evaluation=lambda series_context, data: "first hook", after_evaluation=lambda series_context, data, detail: calls.append(data)) + hook2 = MockHook(before_evaluation=raise_exception, after_evaluation=lambda series_context, data, detail: calls.append(data)) + hook3 = MockHook(before_evaluation=lambda series_context, data: "third hook", after_evaluation=lambda series_context, data, detail: calls.append(data)) + + config = Config('SDK_KEY', update_processor_class=TestData.data_source(), send_events=False, hooks=[hook1, hook2, hook3]) + client = LDClient(config=config) + client.variation('flag-key', user, False) + + assert len(calls) == 3 + # NOTE: These are reversed since the push happens in the after_evaluation + # (when hooks are reversed) + assert calls[0] == "third hook" + assert calls[1] is None + assert calls[2] == "first hook" + + +def test_migration_evaluation_detail_contains_stage_value(): + details = [] + hook = MockHook(before_evaluation=record('before', []), after_evaluation=lambda series_context, data, detail: details.append(detail)) + + td = TestData.data_source() + td.update(td.flag('flag-key').variations("off").variation_for_all(0)) + + config = Config('SDK_KEY', update_processor_class=td, send_events=False, hooks=[hook]) + client = LDClient(config=config) + client.migration_variation('flag-key', user, Stage.LIVE) + + assert len(details) == 1 + assert details[0].value == Stage.OFF.value + assert details[0].variation_index == 0 + + +def test_migration_evaluation_detail_gets_default_if_flag_isnt_migration_flag(): + details = [] + hook = MockHook(before_evaluation=record('before', []), after_evaluation=lambda series_context, data, detail: details.append(detail)) + + td = TestData.data_source() + td.update(td.flag('flag-key').variations("nonstage").variation_for_all(0)) + + config = Config('SDK_KEY', update_processor_class=td, send_events=False, hooks=[hook]) + client = LDClient(config=config) + client.migration_variation('flag-key', user, Stage.LIVE) + + assert len(details) == 1 + assert details[0].value == Stage.LIVE.value + assert details[0].variation_index is None + + +def test_migration_evaluation_detail_default_converts_to_off_if_invalid(): + details = [] + hook = MockHook(before_evaluation=record('before', []), after_evaluation=lambda series_context, data, detail: details.append(detail)) + + td = TestData.data_source() + td.update(td.flag('flag-key').variations("nonstage").variation_for_all(0)) + + config = Config('SDK_KEY', update_processor_class=td, send_events=False, hooks=[hook]) + client = LDClient(config=config) + client.migration_variation('flag-key', user, "invalid") + + assert len(details) == 1 + assert details[0].value == Stage.OFF.value + assert details[0].variation_index is None diff --git a/setup.cfg b/setup.cfg index c1781905..c2ccfc54 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,2 @@ [pycodestyle] -ignore = E501 +ignore = E252,E501