Skip to content
11 changes: 11 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,17 @@ client.get_boolean_flag("my-flag", False, flag_evaluation_options=options)

The OpenFeature SDK logs to the `openfeature` logger using the `logging` package from the Python Standard Library.

#### Logging Hook

The Python SDK includes a `LoggingHook`, which logs detailed information at key points during flag evaluation, using the `logging` package. This hook can be particularly helpful for troubleshooting and debugging; simply attach it at the global, client or invocation level and ensure your log level is set to "debug".

```python
from openfeature import api
from openfeature.hook.logging_hook import LoggingHook

api.add_hooks([LoggingHook()])
```

### Domains

Clients can be assigned to a domain.
Expand Down
68 changes: 68 additions & 0 deletions openfeature/hook/logging_hook.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
import json
import logging
from dataclasses import asdict

from openfeature.evaluation_context import EvaluationContext
from openfeature.exception import ErrorCode, OpenFeatureError
from openfeature.flag_evaluation import FlagEvaluationDetails, FlagValueType
from openfeature.hook import Hook, HookContext, HookHints


class LoggingHook(Hook):
def __init__(
self,
include_evaluation_context: bool = False,
logger: logging.Logger | None = None,
):
self.logger = logger or logging.getLogger("openfeature")
self.include_evaluation_context = include_evaluation_context

def _build_args(self, hook_context: HookContext, stage: str) -> dict:
args = {
"domain": hook_context.client_metadata.domain
if hook_context.client_metadata
else None,
"provider_name": hook_context.provider_metadata.name
if hook_context.provider_metadata
else None,
"flag_key": hook_context.flag_key,
"default_value": hook_context.default_value,
"stage": stage,
}
if self.include_evaluation_context:
args["evaluation_context"] = json.dumps(
asdict(hook_context.evaluation_context),
default=str,
)
return args

def before(
self, hook_context: HookContext, hints: HookHints
) -> EvaluationContext | None:
args = self._build_args(hook_context, "before")
self.logger.debug("Flag evaluation %s", args)
return None

def after(
self,
hook_context: HookContext,
details: FlagEvaluationDetails[FlagValueType],
hints: HookHints,
) -> None:
args = self._build_args(hook_context, "after")
args["reason"] = details.reason
args["variant"] = details.variant
args["value"] = details.value
self.logger.debug("Flag evaluation %s", args)

def error(
self, hook_context: HookContext, exception: Exception, hints: HookHints
) -> None:
args = self._build_args(hook_context, "error")
if isinstance(exception, OpenFeatureError):
args["error_code"] = exception.error_code
args["error_message"] = exception.error_message
else:
args["error_code"] = ErrorCode.GENERAL
args["error_message"] = str(exception)
self.logger.error("Flag evaluation %s", args)
197 changes: 197 additions & 0 deletions tests/hook/test_logging_hook.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,197 @@
from unittest.mock import MagicMock

import pytest

from openfeature.client import ClientMetadata
from openfeature.evaluation_context import EvaluationContext
from openfeature.exception import ErrorCode, FlagNotFoundError
from openfeature.flag_evaluation import FlagEvaluationDetails, FlagType
from openfeature.hook.logging_hook import HookContext, LoggingHook
from openfeature.provider.metadata import Metadata


@pytest.fixture()
def hook_context():
return HookContext(
flag_key="my-flag",
flag_type=FlagType.BOOLEAN,
default_value=False,
evaluation_context=EvaluationContext("user-1", {"env": "prod"}),
client_metadata=ClientMetadata(domain="my-domain"),
provider_metadata=Metadata(name="my-provider"),
)


def test_before_calls_debug_with_stage(hook_context):
mock_logger = MagicMock()
hook = LoggingHook(logger=mock_logger)
hook.before(hook_context, hints={})
mock_logger.debug.assert_called_with(
"Flag evaluation %s",
{
"stage": "before",
"flag_key": "my-flag",
"default_value": False,
"domain": "my-domain",
"provider_name": "my-provider",
},
)


def test_after_calls_debug_with_stage(hook_context):
mock_logger = MagicMock()
hook = LoggingHook(logger=mock_logger)
details = FlagEvaluationDetails(
flag_key="my-flag",
value=True,
variant="on",
reason="STATIC",
)
hook.after(hook_context, details, hints={})

mock_logger.debug.assert_called_with(
"Flag evaluation %s",
{
"stage": "after",
"flag_key": "my-flag",
"default_value": False,
"domain": "my-domain",
"provider_name": "my-provider",
"reason": "STATIC",
"variant": "on",
"value": True,
},
)


def test_after_calls_debug_with_evaluation_context(hook_context):
mock_logger = MagicMock()
hook = LoggingHook(logger=mock_logger, include_evaluation_context=True)
details = FlagEvaluationDetails(
flag_key="my-flag",
value=True,
variant="on",
reason="STATIC",
)
hook.after(hook_context, details, hints={})

mock_logger.debug.assert_called_with(
"Flag evaluation %s",
{
"stage": "after",
"flag_key": "my-flag",
"default_value": False,
"domain": "my-domain",
"provider_name": "my-provider",
"reason": "STATIC",
"variant": "on",
"value": True,
"evaluation_context": '{"targeting_key": "user-1", "attributes": {"env": "prod"}}',
},
)


def test_error_calls_error_log(hook_context):
mock_logger = MagicMock()
hook = LoggingHook(logger=mock_logger)
exception = Exception("something went wrong")
hook.error(hook_context, exception, hints={})

mock_logger.error.assert_called_with(
"Flag evaluation %s",
{
"stage": "error",
"flag_key": "my-flag",
"default_value": False,
"domain": "my-domain",
"provider_name": "my-provider",
"error_code": ErrorCode.GENERAL,
"error_message": "something went wrong",
},
)


def test_error_extracts_error_code_from_open_feature_error(hook_context):
mock_logger = MagicMock()
hook = LoggingHook(logger=mock_logger)
exception = FlagNotFoundError("flag not found")
hook.error(hook_context, exception, hints={})

mock_logger.error.assert_called_with(
"Flag evaluation %s",
{
"stage": "error",
"flag_key": "my-flag",
"default_value": False,
"domain": "my-domain",
"provider_name": "my-provider",
"error_code": ErrorCode.FLAG_NOT_FOUND,
"error_message": "flag not found",
},
)


def test_build_args_without_metadata():
hook = LoggingHook()
ctx = HookContext(
flag_key="flag",
flag_type=FlagType.STRING,
default_value="default",
evaluation_context=EvaluationContext(None, {}),
client_metadata=None,
provider_metadata=None,
)
result = hook._build_args(ctx, "before")
assert result == {
"flag_key": "flag",
"default_value": "default",
"domain": None,
"provider_name": None,
"stage": "before",
}


def test_build_args_excludes_evaluation_context_by_default(hook_context):
hook = LoggingHook()
result = hook._build_args(hook_context, "before")
assert result == {
"flag_key": "my-flag",
"default_value": False,
"domain": "my-domain",
"provider_name": "my-provider",
"stage": "before",
}


def test_build_args_includes_evaluation_context_when_enabled(hook_context):
hook = LoggingHook(include_evaluation_context=True)
result = hook._build_args(hook_context, "after")
assert result == {
"flag_key": "my-flag",
"default_value": False,
"domain": "my-domain",
"provider_name": "my-provider",
"evaluation_context": '{"targeting_key": "user-1", "attributes": {"env": "prod"}}',
"stage": "after",
}


def test_error_calls_error_log_with_evaluation_context(hook_context):
mock_logger = MagicMock()
hook = LoggingHook(logger=mock_logger, include_evaluation_context=True)
exception = Exception("something went wrong")
hook.error(hook_context, exception, hints={})

mock_logger.error.assert_called_with(
"Flag evaluation %s",
{
"stage": "error",
"flag_key": "my-flag",
"default_value": False,
"domain": "my-domain",
"provider_name": "my-provider",
"evaluation_context": '{"targeting_key": "user-1", "attributes": {"env": "prod"}}',
"error_code": ErrorCode.GENERAL,
"error_message": "something went wrong",
},
)
Loading