From 3436c4d54450298ff0845dd22313e69804406e1c Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Wed, 1 Apr 2026 11:57:07 -0600 Subject: [PATCH 01/27] add sample app --- sample-app/build.gradle | 5 +- sample-app/src/main/java/sample/App.java | 218 ++++++++++++++++++++++- 2 files changed, 215 insertions(+), 8 deletions(-) diff --git a/sample-app/build.gradle b/sample-app/build.gradle index 4ee8f22..a6ac6d4 100644 --- a/sample-app/build.gradle +++ b/sample-app/build.gradle @@ -1,5 +1,6 @@ plugins { id 'java-library' + id 'application' } repositories { @@ -13,7 +14,9 @@ dependencies { implementation rootProject } +application { + mainClass = 'sample.App' +} sourceCompatibility = 1.8 targetCompatibility = 1.8 - diff --git a/sample-app/src/main/java/sample/App.java b/sample-app/src/main/java/sample/App.java index 1330241..8fb778f 100644 --- a/sample-app/src/main/java/sample/App.java +++ b/sample-app/src/main/java/sample/App.java @@ -1,13 +1,217 @@ -/** - * This file was auto-generated by Fern from our API Definition. +/* + * Schematic Java Client - Datastream Test Server + * + * This example demonstrates how to use the Schematic Java client to check feature flags. + * It is modeled after the C# DatastreamTestServer example and will be extended + * to support replicator/datastream mode once that functionality is available in the Java SDK. + * + * Environment Variables: + * - SCHEMATIC_API_KEY: Your Schematic API key (required) + * - SCHEMATIC_API_URL: Schematic API base URL (default: https://api.schematichq.com) + * - SERVER_PORT: Port to listen on (default: 8080) + * - CACHE_TTL_MS: Cache TTL in milliseconds (default: 5000) + * + * Usage: + * 1. Set environment variables (only SCHEMATIC_API_KEY is required) + * 2. Run: ./gradlew :sample-app:run + * 3. Test endpoints: + * - GET / - Welcome message + * - GET /config - Show current configuration + * - GET /health - Check service health + * - POST /checkflag - Check feature flags + * + * Example checkflag request: + * curl -X POST http://localhost:8080/checkflag \ + * -H "Content-Type: application/json" \ + * -d '{"flag-key":"my-flag","company":{"id":"comp-123"},"user":{"id":"user-456"}}' */ - package sample; -import java.lang.String; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.schematic.api.Schematic; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpServer; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.time.Instant; +import java.util.LinkedHashMap; +import java.util.Map; public final class App { - public static void main(String[] args) { - // import com.schematic.api.AsyncBaseSchematic - } + + private static final ObjectMapper MAPPER = new ObjectMapper(); + + private static Schematic schematic; + private static String schematicApiUrl; + private static int cacheTtlMs; + private static int serverPort; + + public static void main(String[] args) throws Exception { + String apiKey = System.getenv("SCHEMATIC_API_KEY"); + if (apiKey == null || apiKey.isEmpty()) { + System.err.println("ERROR: SCHEMATIC_API_KEY environment variable is not set"); + System.exit(1); + } + + schematicApiUrl = envOrDefault("SCHEMATIC_API_URL", "https://api.schematichq.com"); + serverPort = Integer.parseInt(envOrDefault("SERVER_PORT", "8080")); + cacheTtlMs = Integer.parseInt(envOrDefault("CACHE_TTL_MS", "5000")); + + Schematic.Builder builder = Schematic.builder().apiKey(apiKey).basePath(schematicApiUrl); + + // TODO: Once datastream/replicator mode is available in the Java SDK, configure it here: + // - Redis cache provider + // - Replicator health URL + // - DatastreamOptions + + schematic = builder.build(); + + HttpServer server = HttpServer.create(new InetSocketAddress(serverPort), 0); + server.createContext("/", App::handleRoot); + server.createContext("/config", App::handleConfig); + server.createContext("/health", App::handleHealth); + server.createContext("/checkflag", App::handleCheckFlag); + server.setExecutor(null); + server.start(); + + System.out.println("Datastream Test Server started on port " + serverPort); + System.out.println("Endpoints:"); + System.out.println(" GET / - Welcome message"); + System.out.println(" GET /config - Show configuration"); + System.out.println(" GET /health - Health check"); + System.out.println(" POST /checkflag - Check a feature flag"); + } + + private static void handleRoot(HttpExchange exchange) throws IOException { + if (!"GET".equals(exchange.getRequestMethod())) { + sendMethodNotAllowed(exchange); + return; + } + sendText(exchange, 200, "Welcome to the Schematic Datastream Test Server!"); + } + + private static void handleConfig(HttpExchange exchange) throws IOException { + if (!"GET".equals(exchange.getRequestMethod())) { + sendMethodNotAllowed(exchange); + return; + } + + Map config = new LinkedHashMap<>(); + config.put("schematicApiUrl", schematicApiUrl); + config.put("cacheTtlMs", cacheTtlMs); + config.put("hasApiKey", true); + + Map endpoints = new LinkedHashMap<>(); + endpoints.put("health", "/health"); + endpoints.put("config", "/config"); + endpoints.put("checkFlag", "/checkflag (POST)"); + + Map response = new LinkedHashMap<>(); + response.put("configuration", config); + response.put("endpoints", endpoints); + response.put("timestamp", Instant.now().toString()); + + sendJson(exchange, 200, response); + } + + private static void handleHealth(HttpExchange exchange) throws IOException { + if (!"GET".equals(exchange.getRequestMethod())) { + sendMethodNotAllowed(exchange); + return; + } + + Map response = new LinkedHashMap<>(); + response.put("status", "healthy"); + + Map config = new LinkedHashMap<>(); + config.put("schematicApiUrl", schematicApiUrl); + config.put("cacheTtlMs", cacheTtlMs); + response.put("configuration", config); + response.put("timestamp", Instant.now().toString()); + + sendJson(exchange, 200, response); + } + + @SuppressWarnings("unchecked") + private static void handleCheckFlag(HttpExchange exchange) throws IOException { + if (!"POST".equals(exchange.getRequestMethod())) { + sendMethodNotAllowed(exchange); + return; + } + + try { + InputStream body = exchange.getRequestBody(); + Map requestBody = MAPPER.readValue(body, Map.class); + + String flagKey = (String) requestBody.get("flag-key"); + if (flagKey == null || flagKey.isEmpty()) { + Map error = new LinkedHashMap<>(); + error.put("error", "flag-key is required"); + sendJson(exchange, 400, error); + return; + } + + Map company = toStringMap(requestBody.get("company")); + Map user = toStringMap(requestBody.get("user")); + + long startTime = System.nanoTime(); + boolean result = schematic.checkFlag(flagKey, company, user); + double durationMs = (System.nanoTime() - startTime) / 1_000_000.0; + + Map response = new LinkedHashMap<>(); + response.put("flagKey", flagKey); + response.put("result", result); + response.put("durationMs", durationMs); + response.put("timestamp", Instant.now().toString()); + + sendJson(exchange, 200, response); + } catch (Exception e) { + Map error = new LinkedHashMap<>(); + error.put("error", "Flag Check Failed"); + error.put("detail", e.getMessage()); + sendJson(exchange, 500, error); + } + } + + @SuppressWarnings("unchecked") + private static Map toStringMap(Object obj) { + if (obj == null) { + return null; + } + Map raw = (Map) obj; + Map result = new LinkedHashMap<>(); + for (Map.Entry entry : raw.entrySet()) { + result.put(entry.getKey(), String.valueOf(entry.getValue())); + } + return result; + } + + private static void sendText(HttpExchange exchange, int status, String text) throws IOException { + byte[] bytes = text.getBytes("UTF-8"); + exchange.getResponseHeaders().set("Content-Type", "text/plain; charset=utf-8"); + exchange.sendResponseHeaders(status, bytes.length); + try (OutputStream os = exchange.getResponseBody()) { + os.write(bytes); + } + } + + private static void sendJson(HttpExchange exchange, int status, Object obj) throws IOException { + byte[] bytes = MAPPER.writerWithDefaultPrettyPrinter().writeValueAsBytes(obj); + exchange.getResponseHeaders().set("Content-Type", "application/json"); + exchange.sendResponseHeaders(status, bytes.length); + try (OutputStream os = exchange.getResponseBody()) { + os.write(bytes); + } + } + + private static void sendMethodNotAllowed(HttpExchange exchange) throws IOException { + sendText(exchange, 405, "Method Not Allowed"); + } + + private static String envOrDefault(String key, String defaultValue) { + String value = System.getenv(key); + return (value != null && !value.isEmpty()) ? value : defaultValue; + } } From 5a184129f9a0b6f0fae905ee5c939f487364c0ab Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Wed, 1 Apr 2026 11:58:04 -0600 Subject: [PATCH 02/27] add datastream websocket functionality --- .fernignore | 2 + .../api/datastream/DataStreamMessages.java | 200 ++++++ .../datastream/DataStreamWebSocketClient.java | 595 ++++++++++++++++++ .../TestDataStreamWebSocketClient.java | 342 ++++++++++ 4 files changed, 1139 insertions(+) create mode 100644 src/main/java/com/schematic/api/datastream/DataStreamMessages.java create mode 100644 src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java create mode 100644 src/test/java/com/schematic/api/datastream/TestDataStreamWebSocketClient.java diff --git a/.fernignore b/.fernignore index b2e9f70..0493b63 100644 --- a/.fernignore +++ b/.fernignore @@ -11,6 +11,7 @@ src/main/java/com/schematic/api/cache/CachedItem.java src/main/java/com/schematic/api/cache/LocalCache.java src/main/java/com/schematic/api/core/NoOpHttpClient.java src/main/java/com/schematic/api/logger/ConsoleLogger.java +src/main/java/com/schematic/api/datastream/ src/main/java/com/schematic/api/logger/SchematicLogger.java src/main/java/com/schematic/webhook/ src/test/java/com/schematic/api/TestCache.java @@ -19,4 +20,5 @@ src/test/java/com/schematic/api/TestLogger.java src/test/java/com/schematic/api/TestOfflineMode.java src/test/java/com/schematic/api/TestReadme.java src/test/java/com/schematic/api/TestSchematic.java +src/test/java/com/schematic/api/datastream/ src/test/java/com/schematic/webhook/ diff --git a/src/main/java/com/schematic/api/datastream/DataStreamMessages.java b/src/main/java/com/schematic/api/datastream/DataStreamMessages.java new file mode 100644 index 0000000..0f777c3 --- /dev/null +++ b/src/main/java/com/schematic/api/datastream/DataStreamMessages.java @@ -0,0 +1,200 @@ +package com.schematic.api.datastream; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.JsonNode; +import java.util.Map; + +/** + * Message types for the DataStream WebSocket protocol. + */ +public final class DataStreamMessages { + + private DataStreamMessages() {} + + /** Actions that can be sent to the datastream server. */ + public enum Action { + @JsonProperty("start") + START, + @JsonProperty("stop") + STOP + } + + /** Entity types used in datastream communication. */ + public enum EntityType { + @JsonProperty("rulesengine.Company") + COMPANY("rulesengine.Company"), + @JsonProperty("rulesengine.Companies") + COMPANIES("rulesengine.Companies"), + @JsonProperty("rulesengine.Flag") + FLAG("rulesengine.Flag"), + @JsonProperty("rulesengine.Flags") + FLAGS("rulesengine.Flags"), + @JsonProperty("rulesengine.User") + USER("rulesengine.User"), + @JsonProperty("rulesengine.Users") + USERS("rulesengine.Users"); + + private final String value; + + EntityType(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + public static EntityType fromString(String value) { + for (EntityType type : values()) { + if (type.value.equals(value)) { + return type; + } + } + return null; + } + } + + /** Message types received from the datastream server. */ + public enum MessageType { + @JsonProperty("full") + FULL("full"), + @JsonProperty("partial") + PARTIAL("partial"), + @JsonProperty("delete") + DELETE("delete"), + @JsonProperty("error") + ERROR("error"), + @JsonProperty("unknown") + UNKNOWN("unknown"); + + private final String value; + + MessageType(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + public static MessageType fromString(String value) { + for (MessageType type : values()) { + if (type.value.equals(value)) { + return type; + } + } + return UNKNOWN; + } + } + + /** Request message sent to the datastream server. */ + public static class DataStreamReq { + @JsonProperty("action") + private final Action action; + + @JsonProperty("entity_type") + private final String entityType; + + @JsonProperty("keys") + private final Map keys; + + public DataStreamReq(Action action, EntityType entityType, Map keys) { + this.action = action; + this.entityType = entityType.getValue(); + this.keys = keys; + } + + public Action getAction() { + return action; + } + + public String getEntityType() { + return entityType; + } + + public Map getKeys() { + return keys; + } + } + + /** Wrapper for request messages (matches Go's DataStreamBaseReq). */ + public static class DataStreamBaseReq { + @JsonProperty("data") + private final DataStreamReq data; + + public DataStreamBaseReq(DataStreamReq data) { + this.data = data; + } + + public DataStreamReq getData() { + return data; + } + } + + /** Response message received from the datastream server. */ + public static class DataStreamResp { + @JsonProperty("data") + private JsonNode data; + + @JsonProperty("entity_id") + private String entityId; + + @JsonProperty("entity_type") + private String entityType; + + @JsonProperty("message_type") + private String messageType; + + public DataStreamResp() {} + + public JsonNode getData() { + return data; + } + + public String getEntityId() { + return entityId; + } + + public String getEntityType() { + return entityType; + } + + public EntityType getEntityTypeEnum() { + return EntityType.fromString(entityType); + } + + public MessageType getMessageTypeEnum() { + return MessageType.fromString(messageType); + } + + public String getMessageType() { + return messageType; + } + } + + /** Error message from the datastream server. */ + public static class DataStreamError { + @JsonProperty("error") + private String error; + + @JsonProperty("keys") + private Map keys; + + @JsonProperty("entity_type") + private String entityType; + + public DataStreamError() {} + + public String getError() { + return error; + } + + public Map getKeys() { + return keys; + } + + public String getEntityType() { + return entityType; + } + } +} diff --git a/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java b/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java new file mode 100644 index 0000000..290274c --- /dev/null +++ b/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java @@ -0,0 +1,595 @@ +package com.schematic.api.datastream; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.schematic.api.datastream.DataStreamMessages.DataStreamResp; +import com.schematic.api.logger.SchematicLogger; +import java.io.Closeable; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import okhttp3.OkHttpClient; +import okhttp3.Request; +import okhttp3.Response; +import okhttp3.WebSocket; +import okhttp3.WebSocketListener; + +/** + * WebSocket client for the Schematic DataStream protocol. + * + *

Provides automatic reconnection with exponential backoff, ping/pong keep-alive, + * and a message worker pool for processing incoming messages. Mirrors the Go + * {@code schematic-datastream-ws} package. + */ +public class DataStreamWebSocketClient implements Closeable { + + /** Functional interface for handling incoming datastream messages. */ + public interface MessageHandler { + void handle(DataStreamResp message) throws Exception; + } + + /** Functional interface called after connection is established, before client is marked ready. */ + public interface ConnectionReadyHandler { + void handle() throws Exception; + } + + // Default constants (matching Go SDK) + private static final long DEFAULT_PONG_WAIT_MS = 40_000; + private static final long DEFAULT_PING_PERIOD_MS = 30_000; + private static final long WRITE_WAIT_MS = 10_000; + private static final int DEFAULT_MAX_RECONNECT_ATTEMPTS = 10; + private static final long DEFAULT_MIN_RECONNECT_DELAY_MS = 1_000; + private static final long DEFAULT_MAX_RECONNECT_DELAY_MS = 30_000; + private static final int DEFAULT_MESSAGE_QUEUE_SIZE = 100; + private static final int DEFAULT_MESSAGE_WORKERS = 1; + + // Configuration + private final String url; + private final String apiKey; + private final MessageHandler messageHandler; + private final ConnectionReadyHandler connectionReadyHandler; + private final SchematicLogger logger; + private final int maxReconnectAttempts; + private final long minReconnectDelayMs; + private final long maxReconnectDelayMs; + private final long pingPeriodMs; + private final long pongWaitMs; + private final int messageQueueSize; + private final int messageWorkers; + + // State + private final AtomicBoolean connected = new AtomicBoolean(false); + private final AtomicBoolean ready = new AtomicBoolean(false); + private final AtomicBoolean closed = new AtomicBoolean(false); + private final AtomicInteger reconnectAttempts = new AtomicInteger(0); + + // Infrastructure + private final OkHttpClient httpClient; + private final ObjectMapper objectMapper; + private final BlockingQueue messageQueue; + private final ExecutorService workerPool; + private final ScheduledExecutorService scheduler; + private volatile WebSocket webSocket; + private volatile ScheduledFuture pingTask; + private volatile CountDownLatch connectLatch; + + private DataStreamWebSocketClient(Builder builder) { + if (builder.url == null || builder.url.isEmpty()) { + throw new IllegalArgumentException("URL is required"); + } + if (builder.apiKey == null || builder.apiKey.isEmpty()) { + throw new IllegalArgumentException("API key is required"); + } + if (builder.messageHandler == null) { + throw new IllegalArgumentException("MessageHandler is required"); + } + + this.url = convertApiUrlToWebSocketUrl(builder.url); + this.apiKey = builder.apiKey; + this.messageHandler = builder.messageHandler; + this.connectionReadyHandler = builder.connectionReadyHandler; + this.logger = builder.logger; + this.maxReconnectAttempts = + builder.maxReconnectAttempts > 0 ? builder.maxReconnectAttempts : DEFAULT_MAX_RECONNECT_ATTEMPTS; + this.minReconnectDelayMs = + builder.minReconnectDelayMs > 0 ? builder.minReconnectDelayMs : DEFAULT_MIN_RECONNECT_DELAY_MS; + this.maxReconnectDelayMs = + builder.maxReconnectDelayMs > 0 ? builder.maxReconnectDelayMs : DEFAULT_MAX_RECONNECT_DELAY_MS; + this.pingPeriodMs = builder.pingPeriodMs > 0 ? builder.pingPeriodMs : DEFAULT_PING_PERIOD_MS; + this.pongWaitMs = builder.pongWaitMs > 0 ? builder.pongWaitMs : DEFAULT_PONG_WAIT_MS; + this.messageQueueSize = builder.messageQueueSize > 0 ? builder.messageQueueSize : DEFAULT_MESSAGE_QUEUE_SIZE; + this.messageWorkers = builder.messageWorkers > 0 ? builder.messageWorkers : DEFAULT_MESSAGE_WORKERS; + + this.objectMapper = new ObjectMapper(); + this.messageQueue = new LinkedBlockingQueue<>(this.messageQueueSize); + + // OkHttp client with ping interval for keep-alive + this.httpClient = new OkHttpClient.Builder() + .pingInterval(this.pingPeriodMs, TimeUnit.MILLISECONDS) + .readTimeout(this.pongWaitMs, TimeUnit.MILLISECONDS) + .connectTimeout(30, TimeUnit.SECONDS) + .build(); + + this.workerPool = Executors.newFixedThreadPool(this.messageWorkers, r -> { + Thread t = new Thread(r, "schematic-ds-worker"); + t.setDaemon(true); + return t; + }); + + this.scheduler = Executors.newSingleThreadScheduledExecutor(r -> { + Thread t = new Thread(r, "schematic-ds-scheduler"); + t.setDaemon(true); + return t; + }); + + // Start message workers + for (int i = 0; i < this.messageWorkers; i++) { + workerPool.submit(this::messageWorkerLoop); + } + } + + public static Builder builder() { + return new Builder(); + } + + /** Starts the WebSocket connection asynchronously. */ + public void start() { + if (closed.get()) { + throw new IllegalStateException("Client has been closed"); + } + log("info", "Starting WebSocket client"); + connectLatch = new CountDownLatch(1); + scheduler.submit(this::connectAndRead); + } + + /** + * Blocks until the client is connected and ready, or the timeout elapses. + * + * @return true if connected and ready, false if timed out + */ + public boolean awaitReady(long timeout, TimeUnit unit) throws InterruptedException { + if (connectLatch == null) { + return ready.get(); + } + return connectLatch.await(timeout, unit); + } + + /** Returns whether the WebSocket is currently connected. */ + public boolean isConnected() { + return connected.get(); + } + + /** Returns whether the client is ready (connected + initialized). */ + public boolean isReady() { + return ready.get() && connected.get(); + } + + /** + * Sends a message through the WebSocket connection. + * + * @param message the message object to serialize and send + * @return true if the message was enqueued for sending + */ + public boolean sendMessage(Object message) { + if (!isConnected() || webSocket == null) { + log("warn", "Cannot send message: WebSocket not connected"); + return false; + } + + try { + String json = objectMapper.writeValueAsString(message); + log("debug", "Sending WebSocket message: " + json); + return webSocket.send(json); + } catch (JsonProcessingException e) { + log("error", "Failed to serialize message: " + e.getMessage()); + return false; + } + } + + /** Gracefully closes the WebSocket connection and shuts down workers. */ + @Override + public void close() { + if (!closed.compareAndSet(false, true)) { + return; // Already closed + } + + log("info", "Closing WebSocket client"); + + // Cancel ping task + if (pingTask != null) { + pingTask.cancel(false); + } + + // Close WebSocket + setConnected(false); + setReady(false); + if (webSocket != null) { + try { + webSocket.close(1000, "Client closing"); + } catch (Exception e) { + log("debug", "Error closing WebSocket: " + e.getMessage()); + } + } + + // Shutdown workers + workerPool.shutdownNow(); + scheduler.shutdownNow(); + httpClient.dispatcher().executorService().shutdownNow(); + httpClient.connectionPool().evictAll(); + + // Release connect latch if anyone is waiting + if (connectLatch != null) { + connectLatch.countDown(); + } + + log("info", "WebSocket client closed"); + } + + // --- Connection lifecycle --- + + private void connectAndRead() { + while (!closed.get()) { + try { + doConnect(); + // If doConnect returns without exception and we're connected, + // reset attempts and wait for disconnection + if (connected.get()) { + reconnectAttempts.set(0); + return; // WebSocket listener handles the rest + } + } catch (Exception e) { + log("error", "Failed to connect to WebSocket: " + e.getMessage()); + } + + int attempts = reconnectAttempts.incrementAndGet(); + if (attempts >= maxReconnectAttempts) { + log("error", "Max reconnection attempts reached (" + maxReconnectAttempts + ")"); + if (connectLatch != null) { + connectLatch.countDown(); + } + return; + } + + long delay = calculateBackoffDelay(attempts); + log( + "info", + String.format( + "Retrying WebSocket connection in %dms (attempt %d/%d)", + delay, attempts, maxReconnectAttempts)); + + try { + Thread.sleep(delay); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return; + } + } + } + + private void doConnect() { + Request request = new Request.Builder() + .url(url) + .header("X-Schematic-Api-Key", apiKey) + .build(); + + log("debug", "Connecting to WebSocket: " + url); + httpClient.newWebSocket(request, new SchematicWebSocketListener()); + } + + private void handleConnected() { + log("info", "Connected to WebSocket"); + setConnected(true); + reconnectAttempts.set(0); + + // Call connection ready handler if provided + if (connectionReadyHandler != null) { + try { + log("debug", "Calling connection ready handler"); + connectionReadyHandler.handle(); + log("debug", "Connection ready handler completed"); + } catch (Exception e) { + log("error", "Connection ready handler failed: " + e.getMessage()); + setConnected(false); + setReady(false); + if (webSocket != null) { + webSocket.close(1000, "Ready handler failed"); + } + return; + } + } + + setReady(true); + log("info", "DataStream client is ready"); + + // Release anyone waiting on awaitReady + if (connectLatch != null) { + connectLatch.countDown(); + } + } + + private void handleDisconnected(int code, String reason) { + setConnected(false); + setReady(false); + + if (closed.get()) { + return; + } + + // Normal/going-away closure or non-retriable (4001 unauthorized) - don't reconnect + if (code == 1000 || code == 1001 || code == 4001) { + log("info", String.format("WebSocket closed (code=%d, reason=%s), not reconnecting", code, reason)); + return; + } + + // All other closures - attempt reconnect + log("info", String.format("WebSocket disconnected (code=%d, reason=%s), scheduling reconnect", code, reason)); + scheduleReconnect(); + } + + private void handleFailure(Throwable t) { + setConnected(false); + setReady(false); + + if (closed.get()) { + return; + } + + log("error", "WebSocket failure: " + t.getMessage()); + scheduleReconnect(); + } + + private void scheduleReconnect() { + if (closed.get()) { + return; + } + scheduler.submit(this::connectAndRead); + } + + // --- Message processing --- + + private void messageWorkerLoop() { + while (!closed.get() && !Thread.currentThread().isInterrupted()) { + try { + DataStreamResp message = messageQueue.poll(1, TimeUnit.SECONDS); + if (message == null) { + continue; + } + + try { + messageHandler.handle(message); + log("debug", "Message processed successfully"); + } catch (Exception e) { + log("error", "Message handler error: " + e.getMessage()); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return; + } + } + } + + // --- Utilities --- + + long calculateBackoffDelay(int attempt) { + long jitter = (long) (Math.random() * minReconnectDelayMs); + long delay = (long) (Math.pow(2, attempt - 1) * minReconnectDelayMs) + jitter; + if (delay > maxReconnectDelayMs) { + delay = maxReconnectDelayMs + jitter; + } + return delay; + } + + static String convertApiUrlToWebSocketUrl(String apiUrl) { + // If already a WebSocket URL, just ensure /datastream path + if (apiUrl.startsWith("ws://") || apiUrl.startsWith("wss://")) { + if (!apiUrl.endsWith("/datastream")) { + return apiUrl + "/datastream"; + } + return apiUrl; + } + + try { + URL parsed = new URL(apiUrl); + String protocol = parsed.getProtocol(); + if (!"http".equals(protocol) && !"https".equals(protocol)) { + throw new IllegalArgumentException( + "Unsupported scheme: " + protocol + " (must be http or https)"); + } + String scheme = "https".equals(protocol) ? "wss" : "ws"; + String host = parsed.getHost(); + int port = parsed.getPort(); + + // Replace 'api' subdomain with 'datastream' + String[] hostParts = host.split("\\."); + if (hostParts.length > 1 && "api".equals(hostParts[0])) { + hostParts[0] = "datastream"; + host = String.join(".", hostParts); + } + + StringBuilder sb = new StringBuilder(); + sb.append(scheme).append("://").append(host); + if (port != -1 && port != 80 && port != 443) { + sb.append(":").append(port); + } + sb.append("/datastream"); + return sb.toString(); + } catch (MalformedURLException e) { + throw new IllegalArgumentException("Invalid API URL: " + apiUrl, e); + } + } + + private void setConnected(boolean value) { + connected.set(value); + if (!value) { + ready.set(false); + } + } + + private void setReady(boolean value) { + ready.set(value); + } + + private void log(String level, String message) { + if (logger == null) { + return; + } + switch (level) { + case "debug": + logger.debug(message); + break; + case "info": + logger.info(message); + break; + case "warn": + logger.warn(message); + break; + case "error": + logger.error(message); + break; + default: + logger.debug(message); + break; + } + } + + // --- WebSocket listener --- + + private class SchematicWebSocketListener extends WebSocketListener { + + @Override + public void onOpen(WebSocket ws, Response response) { + webSocket = ws; + handleConnected(); + } + + @Override + public void onMessage(WebSocket ws, String text) { + log("debug", "Received WebSocket message: " + text); + try { + DataStreamResp message = objectMapper.readValue(text, DataStreamResp.class); + + String entityId = message.getEntityId() != null ? message.getEntityId() : "null"; + log( + "debug", + String.format( + "Parsed message - EntityType: %s, MessageType: %s, EntityID: %s", + message.getEntityType(), message.getMessageType(), entityId)); + + if (!messageQueue.offer(message)) { + log("error", "Message queue full, dropping message"); + } + } catch (JsonProcessingException e) { + log("error", "Failed to parse datastream message: " + e.getMessage()); + } + } + + @Override + public void onClosing(WebSocket ws, int code, String reason) { + log("debug", String.format("WebSocket closing (code=%d, reason=%s)", code, reason)); + ws.close(code, reason); + } + + @Override + public void onClosed(WebSocket ws, int code, String reason) { + log("debug", String.format("WebSocket closed (code=%d, reason=%s)", code, reason)); + handleDisconnected(code, reason); + } + + @Override + public void onFailure(WebSocket ws, Throwable t, Response response) { + String status = response != null ? String.valueOf(response.code()) : "no response"; + log("error", String.format("WebSocket failure: %s (response: %s)", t.getMessage(), status)); + handleFailure(t); + } + } + + // --- Builder --- + + public static class Builder { + private String url; + private String apiKey; + private MessageHandler messageHandler; + private ConnectionReadyHandler connectionReadyHandler; + private SchematicLogger logger; + private int maxReconnectAttempts; + private long minReconnectDelayMs; + private long maxReconnectDelayMs; + private long pingPeriodMs; + private long pongWaitMs; + private int messageWorkers; + private int messageQueueSize; + + public Builder url(String url) { + this.url = url; + return this; + } + + public Builder apiKey(String apiKey) { + this.apiKey = apiKey; + return this; + } + + public Builder messageHandler(MessageHandler messageHandler) { + this.messageHandler = messageHandler; + return this; + } + + public Builder connectionReadyHandler(ConnectionReadyHandler connectionReadyHandler) { + this.connectionReadyHandler = connectionReadyHandler; + return this; + } + + public Builder logger(SchematicLogger logger) { + this.logger = logger; + return this; + } + + public Builder maxReconnectAttempts(int maxReconnectAttempts) { + this.maxReconnectAttempts = maxReconnectAttempts; + return this; + } + + public Builder minReconnectDelayMs(long minReconnectDelayMs) { + this.minReconnectDelayMs = minReconnectDelayMs; + return this; + } + + public Builder maxReconnectDelayMs(long maxReconnectDelayMs) { + this.maxReconnectDelayMs = maxReconnectDelayMs; + return this; + } + + public Builder pingPeriodMs(long pingPeriodMs) { + this.pingPeriodMs = pingPeriodMs; + return this; + } + + public Builder pongWaitMs(long pongWaitMs) { + this.pongWaitMs = pongWaitMs; + return this; + } + + public Builder messageWorkers(int messageWorkers) { + this.messageWorkers = messageWorkers; + return this; + } + + public Builder messageQueueSize(int messageQueueSize) { + this.messageQueueSize = messageQueueSize; + return this; + } + + public DataStreamWebSocketClient build() { + return new DataStreamWebSocketClient(this); + } + } +} diff --git a/src/test/java/com/schematic/api/datastream/TestDataStreamWebSocketClient.java b/src/test/java/com/schematic/api/datastream/TestDataStreamWebSocketClient.java new file mode 100644 index 0000000..a6572f3 --- /dev/null +++ b/src/test/java/com/schematic/api/datastream/TestDataStreamWebSocketClient.java @@ -0,0 +1,342 @@ +package com.schematic.api.datastream; + +import static org.junit.jupiter.api.Assertions.*; + +import com.schematic.api.datastream.DataStreamMessages.Action; +import com.schematic.api.datastream.DataStreamMessages.DataStreamBaseReq; +import com.schematic.api.datastream.DataStreamMessages.DataStreamReq; +import com.schematic.api.datastream.DataStreamMessages.DataStreamResp; +import com.schematic.api.datastream.DataStreamMessages.EntityType; +import com.schematic.api.datastream.DataStreamMessages.MessageType; +import com.schematic.api.logger.SchematicLogger; +import java.util.HashMap; +import java.util.Map; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class DataStreamWebSocketClientTest { + + @Mock + private SchematicLogger logger; + + // --- URL conversion tests (matches Go url_conversion_test.go) --- + + @Test + void convertApiUrlToWebSocketUrl_apiSubdomainWithHttps() { + String result = DataStreamWebSocketClient.convertApiUrlToWebSocketUrl("https://api.schematichq.com"); + assertEquals("wss://datastream.schematichq.com/datastream", result); + } + + @Test + void convertApiUrlToWebSocketUrl_apiSubdomainWithStaging() { + String result = + DataStreamWebSocketClient.convertApiUrlToWebSocketUrl("https://api.staging.schematichq.com"); + assertEquals("wss://datastream.staging.schematichq.com/datastream", result); + } + + @Test + void convertApiUrlToWebSocketUrl_apiSubdomainWithHttp() { + String result = DataStreamWebSocketClient.convertApiUrlToWebSocketUrl("http://api.localhost:8080"); + assertEquals("ws://datastream.localhost:8080/datastream", result); + } + + @Test + void convertApiUrlToWebSocketUrl_nonApiSubdomain() { + String result = DataStreamWebSocketClient.convertApiUrlToWebSocketUrl("https://custom.schematichq.com"); + assertEquals("wss://custom.schematichq.com/datastream", result); + } + + @Test + void convertApiUrlToWebSocketUrl_noSubdomain() { + String result = DataStreamWebSocketClient.convertApiUrlToWebSocketUrl("https://schematichq.com"); + assertEquals("wss://schematichq.com/datastream", result); + } + + @Test + void convertApiUrlToWebSocketUrl_localhostWithoutSubdomain() { + String result = DataStreamWebSocketClient.convertApiUrlToWebSocketUrl("http://localhost:8080"); + assertEquals("ws://localhost:8080/datastream", result); + } + + @Test + void convertApiUrlToWebSocketUrl_invalidScheme() { + assertThrows( + IllegalArgumentException.class, + () -> DataStreamWebSocketClient.convertApiUrlToWebSocketUrl("ftp://api.example.com")); + } + + @Test + void convertApiUrlToWebSocketUrl_invalidUrl() { + assertThrows( + IllegalArgumentException.class, + () -> DataStreamWebSocketClient.convertApiUrlToWebSocketUrl("not a url")); + } + + @Test + void convertApiUrlToWebSocketUrl_alreadyWebSocket() { + String result = + DataStreamWebSocketClient.convertApiUrlToWebSocketUrl("wss://datastream.schematichq.com/datastream"); + assertEquals("wss://datastream.schematichq.com/datastream", result); + } + + @Test + void convertApiUrlToWebSocketUrl_wsWithoutPath() { + String result = DataStreamWebSocketClient.convertApiUrlToWebSocketUrl("wss://datastream.schematichq.com"); + assertEquals("wss://datastream.schematichq.com/datastream", result); + } + + // --- NewClient tests (matches Go TestNewClientWithAPIURL) --- + + @Test + void builder_httpApiUrlConversion() { + DataStreamWebSocketClient client = DataStreamWebSocketClient.builder() + .url("https://api.schematichq.com") + .apiKey("test-api-key") + .messageHandler(msg -> {}) + .build(); + + assertNotNull(client); + assertFalse(client.isConnected()); + assertFalse(client.isReady()); + client.close(); + } + + @Test + void builder_webSocketUrlPassthrough() { + DataStreamWebSocketClient client = DataStreamWebSocketClient.builder() + .url("wss://custom.example.com/ws") + .apiKey("test-api-key") + .messageHandler(msg -> {}) + .build(); + + assertNotNull(client); + client.close(); + } + + @Test + void builder_httpLocalhostConversion() { + DataStreamWebSocketClient client = DataStreamWebSocketClient.builder() + .url("http://api.localhost:8080") + .apiKey("test-api-key") + .messageHandler(msg -> {}) + .build(); + + assertNotNull(client); + client.close(); + } + + @Test + void builder_requiresUrl() { + assertThrows(IllegalArgumentException.class, () -> DataStreamWebSocketClient.builder() + .apiKey("test-api-key") + .messageHandler(msg -> {}) + .build()); + } + + @Test + void builder_requiresApiKey() { + assertThrows(IllegalArgumentException.class, () -> DataStreamWebSocketClient.builder() + .url("https://api.example.com") + .messageHandler(msg -> {}) + .build()); + } + + @Test + void builder_requiresMessageHandler() { + assertThrows(IllegalArgumentException.class, () -> DataStreamWebSocketClient.builder() + .url("https://api.example.com") + .apiKey("test-api-key") + .build()); + } + + @Test + void builder_customOptions() { + DataStreamWebSocketClient client = DataStreamWebSocketClient.builder() + .url("https://api.schematichq.com") + .apiKey("test_key") + .messageHandler(msg -> {}) + .logger(logger) + .maxReconnectAttempts(5) + .minReconnectDelayMs(500) + .maxReconnectDelayMs(15000) + .pingPeriodMs(20000) + .pongWaitMs(25000) + .messageWorkers(2) + .messageQueueSize(50) + .build(); + + assertNotNull(client); + client.close(); + } + + // --- Disconnection behavior tests (matches Go client_test.go handleReadError tests) --- + // In Java, OkHttp delivers these via WebSocketListener callbacks. + // We test the handleDisconnected/handleFailure logic indirectly via state. + + @Test + void close_normalClosure_doesNotReconnect() { + // Normal closure (code 1000) should not trigger reconnect + DataStreamWebSocketClient client = DataStreamWebSocketClient.builder() + .url("https://api.schematichq.com") + .apiKey("test_key") + .messageHandler(msg -> {}) + .logger(logger) + .build(); + + // Simulate: client was never started, close should be clean + client.close(); + assertFalse(client.isConnected()); + assertFalse(client.isReady()); + } + + @Test + void close_isIdempotent() { + DataStreamWebSocketClient client = DataStreamWebSocketClient.builder() + .url("https://api.schematichq.com") + .apiKey("test_key") + .messageHandler(msg -> {}) + .logger(logger) + .build(); + + client.close(); + client.close(); // Should not throw + assertFalse(client.isConnected()); + } + + @Test + void start_throwsAfterClose() { + DataStreamWebSocketClient client = DataStreamWebSocketClient.builder() + .url("https://api.schematichq.com") + .apiKey("test_key") + .messageHandler(msg -> {}) + .logger(logger) + .build(); + + client.close(); + assertThrows(IllegalStateException.class, client::start); + } + + // --- State tests --- + + @Test + void sendMessage_failsWhenNotConnected() { + DataStreamWebSocketClient client = DataStreamWebSocketClient.builder() + .url("https://api.schematichq.com") + .apiKey("test_key") + .messageHandler(msg -> {}) + .logger(logger) + .build(); + + assertFalse(client.sendMessage("test")); + client.close(); + } + + @Test + void initialState_notConnectedNotReady() { + DataStreamWebSocketClient client = DataStreamWebSocketClient.builder() + .url("https://api.schematichq.com") + .apiKey("test_key") + .messageHandler(msg -> {}) + .build(); + + assertFalse(client.isConnected()); + assertFalse(client.isReady()); + client.close(); + } + + // --- Backoff tests --- + + @Test + void calculateBackoffDelay_increasesExponentially() { + DataStreamWebSocketClient client = DataStreamWebSocketClient.builder() + .url("https://api.schematichq.com") + .apiKey("test_key") + .messageHandler(msg -> {}) + .build(); + + // With default minReconnectDelay of 1000ms: + // Attempt 1: 2^0 * 1000 + jitter = 1000 + [0, 1000) + // Attempt 2: 2^1 * 1000 + jitter = 2000 + [0, 1000) + // Attempt 3: 2^2 * 1000 + jitter = 4000 + [0, 1000) + long delay1 = client.calculateBackoffDelay(1); + long delay3 = client.calculateBackoffDelay(3); + + assertTrue(delay1 >= 1000, "Attempt 1 delay should be >= 1000ms, was " + delay1); + assertTrue(delay1 < 2000, "Attempt 1 delay should be < 2000ms, was " + delay1); + assertTrue(delay3 >= 4000, "Attempt 3 delay should be >= 4000ms, was " + delay3); + assertTrue(delay3 < 5000, "Attempt 3 delay should be < 5000ms, was " + delay3); + + client.close(); + } + + @Test + void calculateBackoffDelay_capsAtMax() { + DataStreamWebSocketClient client = DataStreamWebSocketClient.builder() + .url("https://api.schematichq.com") + .apiKey("test_key") + .messageHandler(msg -> {}) + .maxReconnectDelayMs(5000) + .minReconnectDelayMs(1000) + .build(); + + // Attempt 10 would be 2^9 * 1000 = 512000, but should cap at 5000 + jitter + long delay = client.calculateBackoffDelay(10); + assertTrue(delay <= 6000, "Delay should be capped at max + jitter, was " + delay); + + client.close(); + } + + // --- Message type tests --- + + @Test + void entityType_fromString() { + assertEquals(EntityType.COMPANY, EntityType.fromString("rulesengine.Company")); + assertEquals(EntityType.COMPANIES, EntityType.fromString("rulesengine.Companies")); + assertEquals(EntityType.FLAG, EntityType.fromString("rulesengine.Flag")); + assertEquals(EntityType.FLAGS, EntityType.fromString("rulesengine.Flags")); + assertEquals(EntityType.USER, EntityType.fromString("rulesengine.User")); + assertEquals(EntityType.USERS, EntityType.fromString("rulesengine.Users")); + assertNull(EntityType.fromString("unknown")); + } + + @Test + void messageType_fromString() { + assertEquals(MessageType.FULL, MessageType.fromString("full")); + assertEquals(MessageType.PARTIAL, MessageType.fromString("partial")); + assertEquals(MessageType.DELETE, MessageType.fromString("delete")); + assertEquals(MessageType.ERROR, MessageType.fromString("error")); + assertEquals(MessageType.UNKNOWN, MessageType.fromString("something_else")); + } + + @Test + void dataStreamReq_serializesCorrectly() { + Map keys = new HashMap<>(); + keys.put("company_id", "123"); + + DataStreamReq req = new DataStreamReq(Action.START, EntityType.FLAGS, keys); + assertEquals(Action.START, req.getAction()); + assertEquals("rulesengine.Flags", req.getEntityType()); + assertEquals(keys, req.getKeys()); + } + + @Test + void dataStreamBaseReq_wrapsRequest() { + DataStreamReq req = new DataStreamReq(Action.START, EntityType.FLAGS, null); + DataStreamBaseReq baseReq = new DataStreamBaseReq(req); + assertEquals(req, baseReq.getData()); + } + + @Test + void dataStreamResp_defaultConstruction() { + DataStreamResp resp = new DataStreamResp(); + assertNull(resp.getData()); + assertNull(resp.getEntityId()); + assertNull(resp.getEntityType()); + assertNull(resp.getMessageType()); + assertNull(resp.getEntityTypeEnum()); + assertEquals(MessageType.UNKNOWN, resp.getMessageTypeEnum()); + } +} From 11bb1c73f48fcb95d6a5a1889c53949c8c49ae6c Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Wed, 1 Apr 2026 11:58:20 -0600 Subject: [PATCH 03/27] add wasm binary download functionality --- .fernignore | 2 ++ .gitignore | 5 +++- WASM_VERSION | 1 + scripts/download-wasm.sh | 65 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 72 insertions(+), 1 deletion(-) create mode 100644 WASM_VERSION create mode 100755 scripts/download-wasm.sh diff --git a/.fernignore b/.fernignore index 0493b63..231a7a2 100644 --- a/.fernignore +++ b/.fernignore @@ -3,6 +3,8 @@ CLAUDE.md LICENSE README.md .github/CODEOWNERS +WASM_VERSION +scripts/ src/main/java/com/schematic/api/BaseSchematic.java src/main/java/com/schematic/api/EventBuffer.java src/main/java/com/schematic/api/Schematic.java diff --git a/.gitignore b/.gitignore index d4199ab..78b28ac 100644 --- a/.gitignore +++ b/.gitignore @@ -21,4 +21,7 @@ generated_testSrc/ generated/ bin -build \ No newline at end of file +build + +# WASM binary (downloaded at build time via scripts/download-wasm.sh) +src/main/resources/wasm/ \ No newline at end of file diff --git a/WASM_VERSION b/WASM_VERSION new file mode 100644 index 0000000..6e8bf73 --- /dev/null +++ b/WASM_VERSION @@ -0,0 +1 @@ +0.1.0 diff --git a/scripts/download-wasm.sh b/scripts/download-wasm.sh new file mode 100755 index 0000000..a8d2087 --- /dev/null +++ b/scripts/download-wasm.sh @@ -0,0 +1,65 @@ +#!/bin/bash +set -e + +# Downloads the rules engine WASM binary from the schematic-api GitHub Release. +# Reads the pinned version from WASM_VERSION at the repo root. + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +WASM_DIR="$REPO_ROOT/src/main/resources/wasm" +VERSION_FILE="$REPO_ROOT/WASM_VERSION" + +GITHUB_REPO="SchematicHQ/schematic-api" + +if [ ! -f "$VERSION_FILE" ]; then + echo "ERROR: WASM_VERSION file not found at $VERSION_FILE" + exit 1 +fi + +VERSION=$(tr -d '[:space:]' < "$VERSION_FILE") +TAG="rulesengine/v${VERSION}" +ASSET_NAME="rulesengine-wasm-java-v${VERSION}.tar.gz" + +# Skip download if binary already exists and version matches +if [ -f "$WASM_DIR/rulesengine.wasm" ] && [ -f "$WASM_DIR/.wasm_version" ]; then + CURRENT=$(tr -d '[:space:]' < "$WASM_DIR/.wasm_version") + if [ "$CURRENT" = "$VERSION" ]; then + echo "WASM binary already at version $VERSION, skipping download." + exit 0 + fi +fi + +echo "Downloading rules engine WASM v${VERSION}..." +mkdir -p "$WASM_DIR" + +TMPDIR=$(mktemp -d) +trap 'rm -rf "$TMPDIR"' EXIT + +if ! gh release download "$TAG" \ + -R "$GITHUB_REPO" \ + -p "$ASSET_NAME" \ + -D "$TMPDIR" 2>/dev/null; then + echo "ERROR: Failed to download WASM binary" + echo "Tag: $TAG" + echo "Asset: $ASSET_NAME" + echo "" + echo "If this is a new version, ensure a release exists at:" + echo " https://github.com/${GITHUB_REPO}/releases/tag/${TAG}" + echo "" + echo "Ensure the GitHub CLI is authenticated: gh auth status" + exit 1 +fi + +tar -xzf "$TMPDIR/$ASSET_NAME" -C "$TMPDIR" + +if [ ! -f "$TMPDIR/rulesengine.wasm" ]; then + echo "ERROR: rulesengine.wasm not found in release archive" + ls -la "$TMPDIR" + exit 1 +fi + +cp "$TMPDIR"/rulesengine.wasm "$WASM_DIR/" + +echo "$VERSION" > "$WASM_DIR/.wasm_version" + +echo "Downloaded rules engine WASM v${VERSION} to $WASM_DIR/" From 8d40ddf05209b80b82f39cc890ffb34128002081 Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Wed, 1 Apr 2026 14:36:19 -0600 Subject: [PATCH 04/27] add datastream functionality --- build.gradle | 6 +- sample-app/build.gradle | 4 +- sample-app/src/main/java/sample/App.java | 97 ++- .../java/com/schematic/api/Schematic.java | 163 ++++- .../api/datastream/DataStreamClient.java | 584 ++++++++++++++++++ .../api/datastream/DataStreamException.java | 15 + .../datastream/DataStreamWebSocketClient.java | 3 +- .../api/datastream/DatastreamOptions.java | 136 ++++ .../schematic/api/datastream/EntityMerge.java | 179 ++++++ .../schematic/api/datastream/RulesEngine.java | 32 + .../api/datastream/WasmRulesEngine.java | 258 ++++++++ .../java/com/schematic/api/TestSchematic.java | 32 +- .../api/datastream/DataStreamClientTest.java | 441 +++++++++++++ .../api/datastream/EntityMergeTest.java | 217 +++++++ .../TestDataStreamWebSocketClient.java | 3 +- .../api/datastream/WasmRulesEngineTest.java | 189 ++++++ 16 files changed, 2305 insertions(+), 54 deletions(-) create mode 100644 src/main/java/com/schematic/api/datastream/DataStreamClient.java create mode 100644 src/main/java/com/schematic/api/datastream/DataStreamException.java create mode 100644 src/main/java/com/schematic/api/datastream/DatastreamOptions.java create mode 100644 src/main/java/com/schematic/api/datastream/EntityMerge.java create mode 100644 src/main/java/com/schematic/api/datastream/RulesEngine.java create mode 100644 src/main/java/com/schematic/api/datastream/WasmRulesEngine.java create mode 100644 src/test/java/com/schematic/api/datastream/DataStreamClientTest.java create mode 100644 src/test/java/com/schematic/api/datastream/EntityMergeTest.java create mode 100644 src/test/java/com/schematic/api/datastream/WasmRulesEngineTest.java diff --git a/build.gradle b/build.gradle index 29c05bb..7136389 100644 --- a/build.gradle +++ b/build.gradle @@ -18,6 +18,8 @@ dependencies { api 'com.fasterxml.jackson.core:jackson-databind:2.18.6' api 'com.fasterxml.jackson.datatype:jackson-datatype-jdk8:2.18.6' api 'com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.18.6' + implementation 'com.dylibso.chicory:runtime:1.4.0' + implementation 'com.dylibso.chicory:wasi:1.4.0' testImplementation 'org.junit.jupiter:junit-jupiter-api:5.8.2' testImplementation 'org.junit.jupiter:junit-jupiter-engine:5.8.2' testImplementation 'org.junit.jupiter:junit-jupiter-params:5.8.2' @@ -26,8 +28,8 @@ dependencies { } -sourceCompatibility = 1.8 -targetCompatibility = 1.8 +sourceCompatibility = 11 +targetCompatibility = 11 tasks.withType(Javadoc) { failOnError false diff --git a/sample-app/build.gradle b/sample-app/build.gradle index a6ac6d4..3d09398 100644 --- a/sample-app/build.gradle +++ b/sample-app/build.gradle @@ -18,5 +18,5 @@ application { mainClass = 'sample.App' } -sourceCompatibility = 1.8 -targetCompatibility = 1.8 +sourceCompatibility = 11 +targetCompatibility = 11 diff --git a/sample-app/src/main/java/sample/App.java b/sample-app/src/main/java/sample/App.java index 8fb778f..bb9aa0c 100644 --- a/sample-app/src/main/java/sample/App.java +++ b/sample-app/src/main/java/sample/App.java @@ -1,15 +1,16 @@ /* * Schematic Java Client - Datastream Test Server * - * This example demonstrates how to use the Schematic Java client to check feature flags. - * It is modeled after the C# DatastreamTestServer example and will be extended - * to support replicator/datastream mode once that functionality is available in the Java SDK. + * This example demonstrates how to use the Schematic Java client to check feature flags + * with DataStream support, including replicator mode. * * Environment Variables: * - SCHEMATIC_API_KEY: Your Schematic API key (required) * - SCHEMATIC_API_URL: Schematic API base URL (default: https://api.schematichq.com) * - SERVER_PORT: Port to listen on (default: 8080) * - CACHE_TTL_MS: Cache TTL in milliseconds (default: 5000) + * - REPLICATOR_HEALTH_URL: Replicator health check URL (default: http://localhost:8090/ready) + * - USE_REPLICATOR: Set to "true" to enable replicator mode (default: false) * * Usage: * 1. Set environment variables (only SCHEMATIC_API_KEY is required) @@ -17,24 +18,34 @@ * 3. Test endpoints: * - GET / - Welcome message * - GET /config - Show current configuration - * - GET /health - Check service health - * - POST /checkflag - Check feature flags + * - GET /health - Health check with datastream status + * - GET /datastream-status - DataStream/replicator connection status + * - POST /checkflag - Check a feature flag * * Example checkflag request: * curl -X POST http://localhost:8080/checkflag \ * -H "Content-Type: application/json" \ * -d '{"flag-key":"my-flag","company":{"id":"comp-123"},"user":{"id":"user-456"}}' + * + * Replicator mode example: + * export SCHEMATIC_API_KEY="your-key" + * export USE_REPLICATOR=true + * export REPLICATOR_HEALTH_URL="http://localhost:8090/ready" + * ./gradlew :sample-app:run */ package sample; import com.fasterxml.jackson.databind.ObjectMapper; import com.schematic.api.Schematic; +import com.schematic.api.types.RulesengineCheckFlagResult; +import com.schematic.api.datastream.DatastreamOptions; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpServer; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.InetSocketAddress; +import java.time.Duration; import java.time.Instant; import java.util.LinkedHashMap; import java.util.Map; @@ -47,6 +58,8 @@ public final class App { private static String schematicApiUrl; private static int cacheTtlMs; private static int serverPort; + private static boolean useReplicator; + private static String replicatorHealthUrl; public static void main(String[] args) throws Exception { String apiKey = System.getenv("SCHEMATIC_API_KEY"); @@ -58,30 +71,42 @@ public static void main(String[] args) throws Exception { schematicApiUrl = envOrDefault("SCHEMATIC_API_URL", "https://api.schematichq.com"); serverPort = Integer.parseInt(envOrDefault("SERVER_PORT", "8080")); cacheTtlMs = Integer.parseInt(envOrDefault("CACHE_TTL_MS", "5000")); + useReplicator = "true".equalsIgnoreCase(envOrDefault("USE_REPLICATOR", "false")); + replicatorHealthUrl = envOrDefault("REPLICATOR_HEALTH_URL", "http://localhost:8090/ready"); + + // Configure DataStream options + DatastreamOptions.Builder datastreamBuilder = + DatastreamOptions.builder().cacheTTL(Duration.ofMillis(cacheTtlMs)); - Schematic.Builder builder = Schematic.builder().apiKey(apiKey).basePath(schematicApiUrl); + if (useReplicator) { + datastreamBuilder.withReplicatorMode(replicatorHealthUrl); + } - // TODO: Once datastream/replicator mode is available in the Java SDK, configure it here: - // - Redis cache provider - // - Replicator health URL - // - DatastreamOptions + DatastreamOptions datastreamOptions = datastreamBuilder.build(); - schematic = builder.build(); + schematic = Schematic.builder() + .apiKey(apiKey) + .basePath(schematicApiUrl) + .datastreamOptions(datastreamOptions) + .build(); HttpServer server = HttpServer.create(new InetSocketAddress(serverPort), 0); server.createContext("/", App::handleRoot); server.createContext("/config", App::handleConfig); server.createContext("/health", App::handleHealth); + server.createContext("/datastream-status", App::handleDatastreamStatus); server.createContext("/checkflag", App::handleCheckFlag); server.setExecutor(null); server.start(); System.out.println("Datastream Test Server started on port " + serverPort); + System.out.println("Mode: " + (useReplicator ? "replicator" : "direct datastream")); System.out.println("Endpoints:"); - System.out.println(" GET / - Welcome message"); - System.out.println(" GET /config - Show configuration"); - System.out.println(" GET /health - Health check"); - System.out.println(" POST /checkflag - Check a feature flag"); + System.out.println(" GET / - Welcome message"); + System.out.println(" GET /config - Show configuration"); + System.out.println(" GET /health - Health check"); + System.out.println(" GET /datastream-status - DataStream connection status"); + System.out.println(" POST /checkflag - Check a feature flag"); } private static void handleRoot(HttpExchange exchange) throws IOException { @@ -102,10 +127,15 @@ private static void handleConfig(HttpExchange exchange) throws IOException { config.put("schematicApiUrl", schematicApiUrl); config.put("cacheTtlMs", cacheTtlMs); config.put("hasApiKey", true); + config.put("replicatorMode", schematic.isReplicatorMode()); + if (useReplicator) { + config.put("replicatorHealthUrl", replicatorHealthUrl); + } Map endpoints = new LinkedHashMap<>(); endpoints.put("health", "/health"); endpoints.put("config", "/config"); + endpoints.put("datastreamStatus", "/datastream-status"); endpoints.put("checkFlag", "/checkflag (POST)"); Map response = new LinkedHashMap<>(); @@ -124,6 +154,8 @@ private static void handleHealth(HttpExchange exchange) throws IOException { Map response = new LinkedHashMap<>(); response.put("status", "healthy"); + response.put("replicatorMode", schematic.isReplicatorMode()); + response.put("datastreamConnected", schematic.isDatastreamConnected()); Map config = new LinkedHashMap<>(); config.put("schematicApiUrl", schematicApiUrl); @@ -134,6 +166,27 @@ private static void handleHealth(HttpExchange exchange) throws IOException { sendJson(exchange, 200, response); } + private static void handleDatastreamStatus(HttpExchange exchange) throws IOException { + if (!"GET".equals(exchange.getRequestMethod())) { + sendMethodNotAllowed(exchange); + return; + } + + Map response = new LinkedHashMap<>(); + response.put("replicatorMode", schematic.isReplicatorMode()); + response.put("datastreamConnected", schematic.isDatastreamConnected()); + + Map config = new LinkedHashMap<>(); + config.put("schematicApiUrl", schematicApiUrl); + if (useReplicator) { + config.put("replicatorHealthUrl", replicatorHealthUrl); + } + response.put("configuration", config); + response.put("timestamp", Instant.now().toString()); + + sendJson(exchange, 200, response); + } + @SuppressWarnings("unchecked") private static void handleCheckFlag(HttpExchange exchange) throws IOException { if (!"POST".equals(exchange.getRequestMethod())) { @@ -157,12 +210,20 @@ private static void handleCheckFlag(HttpExchange exchange) throws IOException { Map user = toStringMap(requestBody.get("user")); long startTime = System.nanoTime(); - boolean result = schematic.checkFlag(flagKey, company, user); + RulesengineCheckFlagResult result = schematic.checkFlagWithEntitlement(flagKey, company, user); double durationMs = (System.nanoTime() - startTime) / 1_000_000.0; Map response = new LinkedHashMap<>(); - response.put("flagKey", flagKey); - response.put("result", result); + response.put("flagKey", result.getFlagKey()); + response.put("value", result.getValue()); + response.put("reason", result.getReason()); + result.getFlagId().ifPresent(v -> response.put("flagId", v)); + result.getCompanyId().ifPresent(v -> response.put("companyId", v)); + result.getUserId().ifPresent(v -> response.put("userId", v)); + result.getRuleId().ifPresent(v -> response.put("ruleId", v)); + result.getErr().ifPresent(v -> response.put("error", v)); + response.put("replicatorMode", schematic.isReplicatorMode()); + response.put("datastreamConnected", schematic.isDatastreamConnected()); response.put("durationMs", durationMs); response.put("timestamp", Instant.now().toString()); diff --git a/src/main/java/com/schematic/api/Schematic.java b/src/main/java/com/schematic/api/Schematic.java index 8bf13c4..f103ba0 100644 --- a/src/main/java/com/schematic/api/Schematic.java +++ b/src/main/java/com/schematic/api/Schematic.java @@ -7,16 +7,21 @@ import com.schematic.api.core.Environment; import com.schematic.api.core.NoOpHttpClient; import com.schematic.api.core.ObjectMappers; +import com.schematic.api.datastream.DataStreamClient; +import com.schematic.api.datastream.DatastreamOptions; +import com.schematic.api.datastream.WasmRulesEngine; import com.schematic.api.logger.ConsoleLogger; import com.schematic.api.logger.SchematicLogger; import com.schematic.api.resources.features.types.CheckFlagResponse; import com.schematic.api.types.CheckFlagRequestBody; +import com.schematic.api.types.CheckFlagResponseData; import com.schematic.api.types.CreateEventRequestBody; import com.schematic.api.types.EventBody; import com.schematic.api.types.EventBodyIdentify; import com.schematic.api.types.EventBodyIdentifyCompany; import com.schematic.api.types.EventBodyTrack; import com.schematic.api.types.EventType; +import com.schematic.api.types.RulesengineCheckFlagResult; import java.time.Duration; import java.time.OffsetDateTime; import java.util.Collections; @@ -28,12 +33,14 @@ public final class Schematic extends BaseSchematic implements AutoCloseable { private final Duration eventBufferInterval; private final EventBuffer eventBuffer; - private final List> flagCheckCacheProviders; + private final List> flagCheckCacheProviders; private final Map flagDefaults; private final SchematicLogger logger; private final String apiKey; private final Thread shutdownHook; private final boolean offline; + private final DataStreamClient dataStreamClient; + private final DatastreamOptions datastreamOptions; private Schematic(Builder builder) { super(buildClientOptions(builder.apiKey, builder)); @@ -44,8 +51,10 @@ private Schematic(Builder builder) { this.logger = builder.logger != null ? builder.logger : new ConsoleLogger(); this.flagDefaults = builder.flagDefaults != null ? builder.flagDefaults : new HashMap<>(); this.offline = builder.offline; - this.flagCheckCacheProviders = - builder.cacheProviders != null ? builder.cacheProviders : Collections.singletonList(new LocalCache<>()); + this.flagCheckCacheProviders = builder.cacheProviders != null + ? builder.cacheProviders + : Collections.singletonList(new LocalCache()); + this.datastreamOptions = builder.datastreamOptions; this.eventBuffer = new EventBuffer( super.events(), @@ -53,9 +62,34 @@ private Schematic(Builder builder) { builder.eventBufferMaxSize, builder.eventBufferInterval != null ? builder.eventBufferInterval : Duration.ofMillis(5000)); + // Initialize DataStream client if options are provided + if (this.datastreamOptions != null && !this.offline) { + String basePath = builder.basePath != null ? builder.basePath : "https://api.schematichq.com"; + + // Initialize WASM rules engine for local flag evaluation + WasmRulesEngine rulesEngine = null; + try { + rulesEngine = new WasmRulesEngine(this.logger); + rulesEngine.initialize(); + } catch (Exception e) { + this.logger.warn( + "WASM rules engine not available, flag checks will fall back to API: " + e.getMessage()); + rulesEngine = null; + } + + this.dataStreamClient = + new DataStreamClient(this.datastreamOptions, this.apiKey, basePath, this.logger, rulesEngine); + this.dataStreamClient.start(); + } else { + this.dataStreamClient = null; + } + this.shutdownHook = new Thread( () -> { try { + if (this.dataStreamClient != null) { + this.dataStreamClient.close(); + } this.eventBuffer.close(); } catch (Exception e) { logger.error("Error during Schematic shutdown: " + e.getMessage()); @@ -74,12 +108,13 @@ public static class Builder { private String apiKey; private SchematicLogger logger; private Map flagDefaults; - private List> cacheProviders; + private List> cacheProviders; private boolean offline; private Duration eventBufferInterval; private int eventBufferMaxSize = 100; private String basePath; private Map headers; + private DatastreamOptions datastreamOptions; public Builder apiKey(String apiKey) { this.apiKey = apiKey; @@ -96,7 +131,7 @@ public Builder flagDefaults(Map flagDefaults) { return this; } - public Builder cacheProviders(List> cacheProviders) { + public Builder cacheProviders(List> cacheProviders) { this.cacheProviders = cacheProviders; return this; } @@ -126,6 +161,11 @@ public Builder headers(Map headers) { return this; } + public Builder datastreamOptions(DatastreamOptions datastreamOptions) { + this.datastreamOptions = datastreamOptions; + return this; + } + public Schematic build() { if (apiKey == null) { throw new IllegalStateException("API key must be set"); @@ -148,7 +188,7 @@ private static ClientOptions buildClientOptions(String apiKey, Builder builder) return clientOptionsBuilder.build(); } - public List> getFlagCheckCacheProviders() { + public List> getFlagCheckCacheProviders() { return flagCheckCacheProviders; } @@ -164,19 +204,86 @@ public boolean isOffline() { return this.offline; } + /** + * Returns the DataStream client, or null if datastream is not configured. + */ + public DataStreamClient getDataStreamClient() { + return this.dataStreamClient; + } + + /** + * Returns whether the client is operating in replicator mode. + */ + public boolean isReplicatorMode() { + return this.dataStreamClient != null && this.dataStreamClient.isReplicatorMode(); + } + + /** + * Returns whether the datastream connection is active and ready. + */ + public boolean isDatastreamConnected() { + return this.dataStreamClient != null && this.dataStreamClient.isConnected(); + } + + /** + * Checks a feature flag, returning a boolean value. + * + *

If datastream is configured and connected, evaluates the flag locally using cached + * data and the rules engine. Falls back to the API if datastream is unavailable or + * evaluation fails. + */ public boolean checkFlag(String flagKey, Map company, Map user) { + return checkFlagWithEntitlement(flagKey, company, user).getValue(); + } + + /** + * Checks a feature flag, returning the full evaluation result including metadata + * such as the evaluation reason, rule ID, and entitlement information. + * + *

Priority order: + *

    + *
  1. DataStream evaluation (if configured and connected)
  2. + *
  3. API call with result caching (fallback)
  4. + *
  5. Flag default value (if all else fails)
  6. + *
+ */ + public RulesengineCheckFlagResult checkFlagWithEntitlement( + String flagKey, Map company, Map user) { if (offline) { - return getFlagDefault(flagKey); + return RulesengineCheckFlagResult.builder() + .flagKey(flagKey) + .reason("flag default") + .value(getFlagDefault(flagKey)) + .build(); } + // Try datastream first if available + if (dataStreamClient != null && dataStreamClient.isConnected()) { + try { + return dataStreamClient.checkFlag(flagKey, company, user); + } catch (Exception e) { + logger.debug( + "Datastream flag check failed for " + flagKey + ", falling back to API: " + e.getMessage()); + } + } + + // Fall back to API + return checkFlagViaApi(flagKey, company, user); + } + + /** + * Checks a flag via the Schematic API, using the flag check result cache. + */ + private RulesengineCheckFlagResult checkFlagViaApi( + String flagKey, Map company, Map user) { try { String cacheKey = buildCacheKey(flagKey, company, user); - // Check cache - for (CacheProvider provider : flagCheckCacheProviders) { - Boolean cachedValue = provider.get(cacheKey); - if (cachedValue != null) { - return cachedValue; + // Check flag check result cache + for (CacheProvider provider : flagCheckCacheProviders) { + RulesengineCheckFlagResult cached = provider.get(cacheKey); + if (cached != null) { + return cached; } } @@ -185,17 +292,32 @@ public boolean checkFlag(String flagKey, Map company, Map provider : flagCheckCacheProviders) { - provider.set(cacheKey, value); + // Update flag check result cache + for (CacheProvider provider : flagCheckCacheProviders) { + provider.set(cacheKey, result); } - return value; + return result; } catch (Exception e) { - logger.error("Error checking flag: " + e.getMessage()); - return getFlagDefault(flagKey); + logger.error("Error checking flag via API: " + e.getMessage()); + return RulesengineCheckFlagResult.builder() + .flagKey(flagKey) + .reason("flag default") + .value(getFlagDefault(flagKey)) + .err(e.getMessage()) + .build(); } } @@ -267,6 +389,9 @@ public void close() { // Shutdown is already in progress, hook will run automatically } + if (dataStreamClient != null) { + dataStreamClient.close(); + } eventBuffer.close(); } catch (Exception e) { logger.error("Error closing Schematic client: " + e.getMessage()); diff --git a/src/main/java/com/schematic/api/datastream/DataStreamClient.java b/src/main/java/com/schematic/api/datastream/DataStreamClient.java new file mode 100644 index 0000000..da5bec2 --- /dev/null +++ b/src/main/java/com/schematic/api/datastream/DataStreamClient.java @@ -0,0 +1,584 @@ +package com.schematic.api.datastream; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.schematic.api.cache.CacheProvider; +import com.schematic.api.core.ObjectMappers; +import com.schematic.api.datastream.DataStreamMessages.Action; +import com.schematic.api.datastream.DataStreamMessages.DataStreamBaseReq; +import com.schematic.api.datastream.DataStreamMessages.DataStreamReq; +import com.schematic.api.datastream.DataStreamMessages.DataStreamResp; +import com.schematic.api.datastream.DataStreamMessages.EntityType; +import com.schematic.api.datastream.DataStreamMessages.MessageType; +import com.schematic.api.logger.SchematicLogger; +import com.schematic.api.types.RulesengineCheckFlagResult; +import com.schematic.api.types.RulesengineCompany; +import com.schematic.api.types.RulesengineFlag; +import com.schematic.api.types.RulesengineUser; +import java.io.Closeable; +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import okhttp3.OkHttpClient; +import okhttp3.Request; +import okhttp3.Response; + +/** + * High-level DataStream client that manages WebSocket connections (or replicator mode), + * caches entities (flags, companies, users), and provides flag checking. + * + *

Entities are cached as typed objects ({@link RulesengineFlag}, {@link RulesengineCompany}, + * {@link RulesengineUser}) matching the approach used by the Python and Go SDKs. + */ +public class DataStreamClient implements Closeable { + + // Cache key prefixes + static final String FLAG_PREFIX = "flags:"; + static final String COMPANY_PREFIX = "company:"; + static final String COMPANY_KEY_PREFIX = "company_key:"; + static final String USER_PREFIX = "user:"; + static final String USER_KEY_PREFIX = "user_key:"; + + private final DatastreamOptions options; + private final String apiKey; + private final String apiUrl; + private final SchematicLogger logger; + private final ObjectMapper objectMapper; + private final RulesEngine rulesEngine; + + // Typed entity caches + private final CacheProvider flagCache; + private final CacheProvider companyCache; + private final CacheProvider userCache; + + // WebSocket client (direct mode only) + private volatile DataStreamWebSocketClient wsClient; + + // Replicator mode state + private final AtomicBoolean replicatorReady = new AtomicBoolean(false); + private volatile ScheduledExecutorService healthCheckScheduler; + private volatile ScheduledFuture healthCheckTask; + private final OkHttpClient httpClient; + + private final AtomicBoolean closed = new AtomicBoolean(false); + + public DataStreamClient(DatastreamOptions options, String apiKey, String apiUrl, SchematicLogger logger) { + this(options, apiKey, apiUrl, logger, null); + } + + public DataStreamClient( + DatastreamOptions options, String apiKey, String apiUrl, SchematicLogger logger, RulesEngine rulesEngine) { + this.options = options; + this.apiKey = apiKey; + this.apiUrl = apiUrl; + this.logger = logger; + this.objectMapper = ObjectMappers.JSON_MAPPER; + this.rulesEngine = rulesEngine; + this.flagCache = options.getFlagCacheProvider(); + this.companyCache = options.getCompanyCacheProvider(); + this.userCache = options.getUserCacheProvider(); + this.httpClient = new OkHttpClient.Builder() + .connectTimeout(5, TimeUnit.SECONDS) + .readTimeout(5, TimeUnit.SECONDS) + .build(); + } + + /** + * Starts the DataStream client. In direct mode, connects via WebSocket. + * In replicator mode, starts periodic health checks. + */ + public void start() { + if (closed.get()) { + throw new IllegalStateException("DataStreamClient has been closed"); + } + + if (options.isReplicatorMode()) { + startReplicatorMode(); + } else { + startDirectMode(); + } + } + + /** + * Returns whether the datastream is connected and ready for flag checks. + */ + public boolean isConnected() { + if (options.isReplicatorMode()) { + return replicatorReady.get(); + } + return wsClient != null && wsClient.isReady(); + } + + /** + * Returns whether this client is running in replicator mode. + */ + public boolean isReplicatorMode() { + return options.isReplicatorMode(); + } + + /** + * Checks a flag using cached datastream data and the rules engine. + */ + public RulesengineCheckFlagResult checkFlag(String flagKey, Map company, Map user) { + // Step 1: Get flag from cache + RulesengineFlag flag = flagCache.get(FLAG_PREFIX + flagKey); + if (flag == null) { + throw new DataStreamException("Flag not found in cache: " + flagKey); + } + + // Step 2: Try to get company/user from cache + boolean needsCompany = company != null && !company.isEmpty(); + boolean needsUser = user != null && !user.isEmpty(); + + RulesengineCompany cachedCompany = null; + RulesengineUser cachedUser = null; + + if (needsCompany) { + cachedCompany = getCachedCompany(company); + log("debug", "Company " + (cachedCompany != null ? "found in cache" : "not found in cache")); + } + if (needsUser) { + cachedUser = getCachedUser(user); + log("debug", "User " + (cachedUser != null ? "found in cache" : "not found in cache")); + } + + // Step 3: Replicator mode - evaluate with whatever we have + if (options.isReplicatorMode()) { + return evaluateFlag(flag, cachedCompany, cachedUser); + } + + // Step 4: Direct mode - if all needed data is cached, evaluate immediately + if ((!needsCompany || cachedCompany != null) && (!needsUser || cachedUser != null)) { + log("debug", "All required resources found in cache for flag " + flagKey); + return evaluateFlag(flag, cachedCompany, cachedUser); + } + + // Step 5: Direct mode - fetch missing entities via datastream + if (!isConnected()) { + throw new DataStreamException("Datastream not connected and required entities not in cache"); + } + + if (needsCompany && cachedCompany == null) { + requestEntity(EntityType.COMPANY, company); + } + if (needsUser && cachedUser == null) { + requestEntity(EntityType.USER, user); + } + + return evaluateFlag(flag, cachedCompany, cachedUser); + } + + /** + * Evaluates a flag using the rules engine. Falls back to the flag's default value + * if the rules engine is not available. + */ + RulesengineCheckFlagResult evaluateFlag(RulesengineFlag flag, RulesengineCompany company, RulesengineUser user) { + boolean defaultValue = flag.getDefaultValue(); + String flagKey = flag.getKey(); + String flagId = flag.getId(); + String companyId = company != null ? company.getId() : null; + String userId = user != null ? user.getId() : null; + + if (rulesEngine != null && rulesEngine.isInitialized()) { + try { + RulesengineCheckFlagResult result = rulesEngine.checkFlag(flag, company, user); + // The WASM engine returns a complete result — use it directly, + // enriching with IDs from context if the engine didn't set them + return RulesengineCheckFlagResult.builder() + .from(result) + .companyId(result.getCompanyId().orElse(companyId)) + .userId(result.getUserId().orElse(userId)) + .build(); + } catch (Exception e) { + log("error", "Rules engine evaluation failed for flag " + flagKey + ": " + e.getMessage()); + return RulesengineCheckFlagResult.builder() + .flagKey(flagKey) + .reason("RULES_ENGINE_ERROR") + .value(defaultValue) + .flagId(flagId) + .companyId(companyId) + .userId(userId) + .err(e.getMessage()) + .build(); + } + } + + log("debug", "Rules engine not available, returning default for flag " + flagKey); + return RulesengineCheckFlagResult.builder() + .flagKey(flagKey) + .reason("RULES_ENGINE_UNAVAILABLE") + .value(defaultValue) + .flagId(flagId) + .companyId(companyId) + .userId(userId) + .build(); + } + + /** + * Retrieves a cached flag definition by key. + */ + public RulesengineFlag getCachedFlag(String flagKey) { + return flagCache.get(FLAG_PREFIX + flagKey); + } + + /** + * Retrieves a cached company by its lookup keys. + */ + public RulesengineCompany getCachedCompany(Map keys) { + if (keys == null || keys.isEmpty()) { + return null; + } + // Look up by first key pair, matching the Node/Python SDK approach + Map.Entry first = keys.entrySet().iterator().next(); + String cacheKey = COMPANY_KEY_PREFIX + first.getKey() + ":" + first.getValue(); + return companyCache.get(cacheKey); + } + + /** + * Retrieves a cached user by its lookup keys. + */ + public RulesengineUser getCachedUser(Map keys) { + if (keys == null || keys.isEmpty()) { + return null; + } + Map.Entry first = keys.entrySet().iterator().next(); + String cacheKey = USER_KEY_PREFIX + first.getKey() + ":" + first.getValue(); + return userCache.get(cacheKey); + } + + /** + * Sends a request to the datastream to fetch a specific entity. + * Only works in direct (WebSocket) mode. + */ + public void requestEntity(EntityType entityType, Map keys) { + if (options.isReplicatorMode()) { + log("debug", "Cannot request entities in replicator mode"); + return; + } + if (wsClient == null || !wsClient.isReady()) { + log("warn", "Cannot request entity: WebSocket not ready"); + return; + } + + DataStreamReq req = new DataStreamReq(Action.START, entityType, keys); + DataStreamBaseReq baseReq = new DataStreamBaseReq(req); + wsClient.sendMessage(baseReq); + } + + @Override + public void close() { + if (!closed.compareAndSet(false, true)) { + return; + } + + log("info", "Closing DataStream client"); + + if (healthCheckTask != null) { + healthCheckTask.cancel(false); + } + if (healthCheckScheduler != null) { + healthCheckScheduler.shutdownNow(); + } + if (wsClient != null) { + wsClient.close(); + } + + httpClient.dispatcher().executorService().shutdownNow(); + httpClient.connectionPool().evictAll(); + + log("info", "DataStream client closed"); + } + + // --- Direct WebSocket mode --- + + private void startDirectMode() { + log("info", "Starting DataStream client in direct mode"); + + wsClient = DataStreamWebSocketClient.builder() + .url(apiUrl) + .apiKey(apiKey) + .messageHandler(this::handleMessage) + .connectionReadyHandler(this::onConnectionReady) + .logger(logger) + .build(); + + wsClient.start(); + } + + private void onConnectionReady() { + log("info", "DataStream connection established, requesting flags"); + DataStreamReq req = new DataStreamReq(Action.START, EntityType.FLAGS, null); + DataStreamBaseReq baseReq = new DataStreamBaseReq(req); + wsClient.sendMessage(baseReq); + } + + // --- Replicator mode --- + + private void startReplicatorMode() { + log("info", "Starting DataStream client in replicator mode"); + log("info", "Replicator health URL: " + options.getReplicatorHealthUrl()); + + healthCheckScheduler = Executors.newSingleThreadScheduledExecutor(r -> { + Thread t = new Thread(r, "schematic-replicator-health"); + t.setDaemon(true); + return t; + }); + + long intervalMs = options.getReplicatorHealthCheckInterval().toMillis(); + healthCheckTask = healthCheckScheduler.scheduleAtFixedRate( + this::checkReplicatorHealth, 0, intervalMs, TimeUnit.MILLISECONDS); + } + + void checkReplicatorHealth() { + try { + Request request = new Request.Builder() + .url(options.getReplicatorHealthUrl()) + .get() + .build(); + + try (Response response = httpClient.newCall(request).execute()) { + if (response.isSuccessful() && response.body() != null) { + JsonNode body = objectMapper.readTree(response.body().string()); + boolean ready = body.has("ready") && body.get("ready").asBoolean(false); + boolean wasReady = replicatorReady.getAndSet(ready); + + if (ready && !wasReady) { + log("info", "Replicator is now ready"); + if (body.has("cache_version")) { + log( + "info", + "Replicator cache version: " + + body.get("cache_version").asText()); + } + } else if (!ready && wasReady) { + log("warn", "Replicator is no longer ready"); + } + } else { + boolean wasReady = replicatorReady.getAndSet(false); + if (wasReady) { + log("warn", "Replicator health check failed with status: " + response.code()); + } + } + } + } catch (IOException e) { + boolean wasReady = replicatorReady.getAndSet(false); + if (wasReady) { + log("warn", "Replicator health check failed: " + e.getMessage()); + } + log("debug", "Replicator health check error: " + e.getMessage()); + } + } + + // --- Message handling --- + + void handleMessage(DataStreamResp message) { + EntityType entityType = message.getEntityTypeEnum(); + MessageType messageType = message.getMessageTypeEnum(); + + if (entityType == null) { + log("warn", "Received message with unknown entity type: " + message.getEntityType()); + return; + } + + if (messageType == MessageType.ERROR) { + handleErrorMessage(message); + return; + } + + switch (entityType) { + case FLAG: + case FLAGS: + handleFlagMessage(message, messageType); + break; + case COMPANY: + case COMPANIES: + handleCompanyMessage(message, messageType); + break; + case USER: + case USERS: + handleUserMessage(message, messageType); + break; + default: + log("debug", "Unhandled entity type: " + entityType); + } + } + + private void handleFlagMessage(DataStreamResp message, MessageType messageType) { + JsonNode data = message.getData(); + if (data == null) { + return; + } + + if (messageType == MessageType.FULL) { + if (data.isArray()) { + for (JsonNode flagData : data) { + cacheFlag(flagData); + } + } else { + cacheFlag(data); + } + } else if (messageType == MessageType.DELETE) { + String flagKey = data.has("key") ? data.get("key").asText() : null; + if (flagKey != null) { + flagCache.set(FLAG_PREFIX + flagKey, null); + } + } + } + + private void handleCompanyMessage(DataStreamResp message, MessageType messageType) { + JsonNode data = message.getData(); + if (data == null) { + return; + } + + if (messageType == MessageType.FULL) { + if (data.isArray()) { + for (JsonNode companyData : data) { + cacheCompany(companyData); + } + } else { + cacheCompany(data); + } + } else if (messageType == MessageType.PARTIAL) { + String entityId = message.getEntityId(); + if (entityId != null) { + RulesengineCompany existing = companyCache.get(COMPANY_PREFIX + entityId); + if (existing != null) { + try { + RulesengineCompany merged = EntityMerge.partialCompany(existing, data); + cacheCompanyObject(merged); + } catch (Exception e) { + log("warn", "Failed to merge partial company update: " + e.getMessage()); + } + } else { + // No existing company — try to parse as full + cacheCompany(data); + } + } + } else if (messageType == MessageType.DELETE) { + String entityId = message.getEntityId(); + if (entityId != null) { + companyCache.set(COMPANY_PREFIX + entityId, null); + } + } + } + + private void handleUserMessage(DataStreamResp message, MessageType messageType) { + JsonNode data = message.getData(); + if (data == null) { + return; + } + + if (messageType == MessageType.FULL) { + if (data.isArray()) { + for (JsonNode userData : data) { + cacheUser(userData); + } + } else { + cacheUser(data); + } + } else if (messageType == MessageType.PARTIAL) { + String entityId = message.getEntityId(); + if (entityId != null) { + RulesengineUser existing = userCache.get(USER_PREFIX + entityId); + if (existing != null) { + try { + RulesengineUser merged = EntityMerge.partialUser(existing, data); + cacheUserObject(merged); + } catch (Exception e) { + log("warn", "Failed to merge partial user update: " + e.getMessage()); + } + } else { + cacheUser(data); + } + } + } else if (messageType == MessageType.DELETE) { + String entityId = message.getEntityId(); + if (entityId != null) { + userCache.set(USER_PREFIX + entityId, null); + } + } + } + + private void handleErrorMessage(DataStreamResp message) { + JsonNode data = message.getData(); + if (data != null) { + log("error", "DataStream error for entity " + message.getEntityType() + ": " + data.toString()); + } else { + log("error", "DataStream error for entity " + message.getEntityType()); + } + } + + // --- Cache helpers: parse JSON once into typed objects --- + + private void cacheFlag(JsonNode data) { + try { + RulesengineFlag flag = objectMapper.treeToValue(data, RulesengineFlag.class); + log("debug", "Caching flag: " + flag.getKey()); + flagCache.set(FLAG_PREFIX + flag.getKey(), flag); + } catch (Exception e) { + log("warn", "Failed to parse flag from datastream: " + e.getMessage()); + } + } + + private void cacheCompany(JsonNode data) { + try { + RulesengineCompany company = objectMapper.treeToValue(data, RulesengineCompany.class); + cacheCompanyObject(company); + } catch (Exception e) { + log("warn", "Failed to parse company from datastream: " + e.getMessage()); + } + } + + private void cacheCompanyObject(RulesengineCompany company) { + companyCache.set(COMPANY_PREFIX + company.getId(), company); + for (Map.Entry entry : company.getKeys().entrySet()) { + String cacheKey = COMPANY_KEY_PREFIX + entry.getKey() + ":" + entry.getValue(); + companyCache.set(cacheKey, company); + } + } + + private void cacheUser(JsonNode data) { + try { + RulesengineUser user = objectMapper.treeToValue(data, RulesengineUser.class); + cacheUserObject(user); + } catch (Exception e) { + log("warn", "Failed to parse user from datastream: " + e.getMessage()); + } + } + + private void cacheUserObject(RulesengineUser user) { + userCache.set(USER_PREFIX + user.getId(), user); + for (Map.Entry entry : user.getKeys().entrySet()) { + String cacheKey = USER_KEY_PREFIX + entry.getKey() + ":" + entry.getValue(); + userCache.set(cacheKey, user); + } + } + + private void log(String level, String message) { + if (logger == null) { + return; + } + switch (level) { + case "debug": + logger.debug(message); + break; + case "info": + logger.info(message); + break; + case "warn": + logger.warn(message); + break; + case "error": + logger.error(message); + break; + default: + logger.debug(message); + break; + } + } +} diff --git a/src/main/java/com/schematic/api/datastream/DataStreamException.java b/src/main/java/com/schematic/api/datastream/DataStreamException.java new file mode 100644 index 0000000..15b30fc --- /dev/null +++ b/src/main/java/com/schematic/api/datastream/DataStreamException.java @@ -0,0 +1,15 @@ +package com.schematic.api.datastream; + +/** + * Exception thrown when a DataStream operation fails. + */ +public class DataStreamException extends RuntimeException { + + public DataStreamException(String message) { + super(message); + } + + public DataStreamException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java b/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java index 290274c..32504ee 100644 --- a/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java +++ b/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java @@ -401,8 +401,7 @@ static String convertApiUrlToWebSocketUrl(String apiUrl) { URL parsed = new URL(apiUrl); String protocol = parsed.getProtocol(); if (!"http".equals(protocol) && !"https".equals(protocol)) { - throw new IllegalArgumentException( - "Unsupported scheme: " + protocol + " (must be http or https)"); + throw new IllegalArgumentException("Unsupported scheme: " + protocol + " (must be http or https)"); } String scheme = "https".equals(protocol) ? "wss" : "ws"; String host = parsed.getHost(); diff --git a/src/main/java/com/schematic/api/datastream/DatastreamOptions.java b/src/main/java/com/schematic/api/datastream/DatastreamOptions.java new file mode 100644 index 0000000..9f5041a --- /dev/null +++ b/src/main/java/com/schematic/api/datastream/DatastreamOptions.java @@ -0,0 +1,136 @@ +package com.schematic.api.datastream; + +import com.schematic.api.cache.CacheProvider; +import com.schematic.api.cache.LocalCache; +import com.schematic.api.types.RulesengineCompany; +import com.schematic.api.types.RulesengineFlag; +import com.schematic.api.types.RulesengineUser; +import java.time.Duration; + +/** + * Configuration options for the Schematic DataStream client. + * + *

DataStream provides real-time flag and entity data via persistent WebSocket connections + * or via an external replicator service that populates a shared cache. + */ +public class DatastreamOptions { + + private static final Duration DEFAULT_CACHE_TTL = Duration.ofHours(24); + private static final String DEFAULT_REPLICATOR_HEALTH_URL = "http://localhost:8090/ready"; + private static final Duration DEFAULT_REPLICATOR_HEALTH_CHECK_INTERVAL = Duration.ofSeconds(30); + + private final Duration cacheTTL; + private final boolean replicatorMode; + private final String replicatorHealthUrl; + private final Duration replicatorHealthCheckInterval; + private final CacheProvider flagCacheProvider; + private final CacheProvider companyCacheProvider; + private final CacheProvider userCacheProvider; + + private DatastreamOptions(Builder builder) { + this.cacheTTL = builder.cacheTTL != null ? builder.cacheTTL : DEFAULT_CACHE_TTL; + this.replicatorMode = builder.replicatorMode; + this.replicatorHealthUrl = + builder.replicatorHealthUrl != null ? builder.replicatorHealthUrl : DEFAULT_REPLICATOR_HEALTH_URL; + this.replicatorHealthCheckInterval = builder.replicatorHealthCheckInterval != null + ? builder.replicatorHealthCheckInterval + : DEFAULT_REPLICATOR_HEALTH_CHECK_INTERVAL; + this.flagCacheProvider = + builder.flagCacheProvider != null ? builder.flagCacheProvider : new LocalCache<>(10_000, this.cacheTTL); + this.companyCacheProvider = builder.companyCacheProvider != null + ? builder.companyCacheProvider + : new LocalCache<>(10_000, this.cacheTTL); + this.userCacheProvider = + builder.userCacheProvider != null ? builder.userCacheProvider : new LocalCache<>(10_000, this.cacheTTL); + } + + public static Builder builder() { + return new Builder(); + } + + public Duration getCacheTTL() { + return cacheTTL; + } + + public boolean isReplicatorMode() { + return replicatorMode; + } + + public String getReplicatorHealthUrl() { + return replicatorHealthUrl; + } + + public Duration getReplicatorHealthCheckInterval() { + return replicatorHealthCheckInterval; + } + + public CacheProvider getFlagCacheProvider() { + return flagCacheProvider; + } + + public CacheProvider getCompanyCacheProvider() { + return companyCacheProvider; + } + + public CacheProvider getUserCacheProvider() { + return userCacheProvider; + } + + public static class Builder { + private Duration cacheTTL; + private boolean replicatorMode; + private String replicatorHealthUrl; + private Duration replicatorHealthCheckInterval; + private CacheProvider flagCacheProvider; + private CacheProvider companyCacheProvider; + private CacheProvider userCacheProvider; + + public Builder cacheTTL(Duration cacheTTL) { + this.cacheTTL = cacheTTL; + return this; + } + + public Builder replicatorMode(boolean replicatorMode) { + this.replicatorMode = replicatorMode; + return this; + } + + public Builder replicatorHealthUrl(String replicatorHealthUrl) { + this.replicatorHealthUrl = replicatorHealthUrl; + return this; + } + + public Builder replicatorHealthCheckInterval(Duration interval) { + this.replicatorHealthCheckInterval = interval; + return this; + } + + public Builder flagCacheProvider(CacheProvider flagCacheProvider) { + this.flagCacheProvider = flagCacheProvider; + return this; + } + + public Builder companyCacheProvider(CacheProvider companyCacheProvider) { + this.companyCacheProvider = companyCacheProvider; + return this; + } + + public Builder userCacheProvider(CacheProvider userCacheProvider) { + this.userCacheProvider = userCacheProvider; + return this; + } + + /** + * Convenience method to enable replicator mode with a health check URL. + */ + public Builder withReplicatorMode(String healthUrl) { + this.replicatorMode = true; + this.replicatorHealthUrl = healthUrl; + return this; + } + + public DatastreamOptions build() { + return new DatastreamOptions(this); + } + } +} diff --git a/src/main/java/com/schematic/api/datastream/EntityMerge.java b/src/main/java/com/schematic/api/datastream/EntityMerge.java new file mode 100644 index 0000000..236941c --- /dev/null +++ b/src/main/java/com/schematic/api/datastream/EntityMerge.java @@ -0,0 +1,179 @@ +package com.schematic.api.datastream; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.schematic.api.core.ObjectMappers; +import com.schematic.api.types.RulesengineCompany; +import com.schematic.api.types.RulesengineUser; +import java.util.Iterator; +import java.util.Map; + +/** + * Merge helpers for partial datastream updates. + * + *

Matches the Python SDK's merge.py: + *

    + *
  • Company: additive merge for {@code keys} and {@code credit_balances}, + * upsert for {@code metrics}, replace for all other fields
  • + *
  • User: additive merge for {@code keys}, replace for all other fields
  • + *
+ */ +final class EntityMerge { + + private static final ObjectMapper MAPPER = ObjectMappers.JSON_MAPPER; + + private EntityMerge() {} + + /** + * Merges a partial company update into an existing company. + * Only fields present in the partial are applied. + * + * @param existing the current cached company + * @param partial the partial update as raw JSON from the datastream + * @return a new merged company + */ + static RulesengineCompany partialCompany(RulesengineCompany existing, JsonNode partial) { + // Serialize existing to a mutable JSON tree + ObjectNode base = (ObjectNode) MAPPER.valueToTree(existing); + + Iterator> fields = partial.fields(); + while (fields.hasNext()) { + Map.Entry field = fields.next(); + String key = field.getKey(); + JsonNode value = field.getValue(); + + switch (key) { + case "keys": + case "credit_balances": + // Additive merge: overlay partial keys onto existing + mergeObject(base, key, value); + break; + case "metrics": + // Upsert: match by (event_subtype, period, month_reset) + upsertMetrics(base, value); + break; + default: + // Replace + base.set(key, value); + break; + } + } + + return MAPPER.convertValue(base, RulesengineCompany.class); + } + + /** + * Merges a partial user update into an existing user. + * Only fields present in the partial are applied. + * + * @param existing the current cached user + * @param partial the partial update as raw JSON from the datastream + * @return a new merged user + */ + static RulesengineUser partialUser(RulesengineUser existing, JsonNode partial) { + ObjectNode base = (ObjectNode) MAPPER.valueToTree(existing); + + Iterator> fields = partial.fields(); + while (fields.hasNext()) { + Map.Entry field = fields.next(); + String key = field.getKey(); + JsonNode value = field.getValue(); + + if ("keys".equals(key)) { + // Additive merge + mergeObject(base, key, value); + } else { + // Replace + base.set(key, value); + } + } + + return MAPPER.convertValue(base, RulesengineUser.class); + } + + /** + * Additively merges an object field: existing keys are preserved, + * partial keys are added or updated. + */ + private static void mergeObject(ObjectNode base, String fieldName, JsonNode partial) { + if (!partial.isObject()) { + base.set(fieldName, partial); + return; + } + JsonNode existing = base.get(fieldName); + if (existing == null || !existing.isObject()) { + base.set(fieldName, partial); + return; + } + + ObjectNode merged = ((ObjectNode) existing).deepCopy(); + Iterator> fields = partial.fields(); + while (fields.hasNext()) { + Map.Entry field = fields.next(); + merged.set(field.getKey(), field.getValue()); + } + base.set(fieldName, merged); + } + + /** + * Upserts metrics by matching on (event_subtype, period, month_reset). + * Existing metrics with matching keys are replaced; new metrics are appended. + */ + private static void upsertMetrics(ObjectNode base, JsonNode partialMetrics) { + if (!partialMetrics.isArray()) { + return; + } + JsonNode existingMetrics = base.get("metrics"); + if (existingMetrics == null || !existingMetrics.isArray()) { + base.set("metrics", partialMetrics); + return; + } + + // Build mutable list from existing + com.fasterxml.jackson.databind.node.ArrayNode result = MAPPER.createArrayNode(); + // Copy existing metrics, replacing any that match a partial metric + for (JsonNode existing : existingMetrics) { + boolean replaced = false; + for (JsonNode partial : partialMetrics) { + if (metricsMatch(existing, partial)) { + result.add(partial); + replaced = true; + break; + } + } + if (!replaced) { + result.add(existing); + } + } + // Append any partial metrics that didn't match an existing one + for (JsonNode partial : partialMetrics) { + boolean found = false; + for (JsonNode existing : existingMetrics) { + if (metricsMatch(existing, partial)) { + found = true; + break; + } + } + if (!found) { + result.add(partial); + } + } + + base.set("metrics", result); + } + + /** + * Matches metrics by (event_subtype, period, month_reset) key, + * matching the Python SDK's _metric_key() function. + */ + private static boolean metricsMatch(JsonNode a, JsonNode b) { + return textEquals(a, b, "event_subtype") && textEquals(a, b, "period") && textEquals(a, b, "month_reset"); + } + + private static boolean textEquals(JsonNode a, JsonNode b, String field) { + String aVal = a.has(field) ? a.get(field).asText("") : ""; + String bVal = b.has(field) ? b.get(field).asText("") : ""; + return aVal.equals(bVal); + } +} diff --git a/src/main/java/com/schematic/api/datastream/RulesEngine.java b/src/main/java/com/schematic/api/datastream/RulesEngine.java new file mode 100644 index 0000000..7a9186f --- /dev/null +++ b/src/main/java/com/schematic/api/datastream/RulesEngine.java @@ -0,0 +1,32 @@ +package com.schematic.api.datastream; + +import com.schematic.api.types.RulesengineCheckFlagResult; +import com.schematic.api.types.RulesengineCompany; +import com.schematic.api.types.RulesengineFlag; +import com.schematic.api.types.RulesengineUser; + +/** + * Interface for local flag evaluation using a rules engine. + * + *

The WASM-based implementation evaluates flag rules locally using cached + * flag definitions, company data, and user data from the DataStream. + */ +public interface RulesEngine { + + /** + * Returns whether the rules engine has been initialized and is ready to evaluate flags. + */ + boolean isInitialized(); + + /** + * Evaluates a flag against the provided company and user context. + * + * @param flag the flag definition from the datastream cache + * @param company the company data (may be null) + * @param user the user data (may be null) + * @return the evaluation result + * @throws Exception if evaluation fails + */ + RulesengineCheckFlagResult checkFlag(RulesengineFlag flag, RulesengineCompany company, RulesengineUser user) + throws Exception; +} diff --git a/src/main/java/com/schematic/api/datastream/WasmRulesEngine.java b/src/main/java/com/schematic/api/datastream/WasmRulesEngine.java new file mode 100644 index 0000000..ad50531 --- /dev/null +++ b/src/main/java/com/schematic/api/datastream/WasmRulesEngine.java @@ -0,0 +1,258 @@ +package com.schematic.api.datastream; + +import com.dylibso.chicory.runtime.ExportFunction; +import com.dylibso.chicory.runtime.HostFunction; +import com.dylibso.chicory.runtime.ImportValues; +import com.dylibso.chicory.runtime.Instance; +import com.dylibso.chicory.runtime.Memory; +import com.dylibso.chicory.wasi.WasiOptions; +import com.dylibso.chicory.wasi.WasiPreview1; +import com.dylibso.chicory.wasi.WasiPreview1_ModuleFactory; +import com.dylibso.chicory.wasm.Parser; +import com.dylibso.chicory.wasm.WasmModule; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.schematic.api.core.ObjectMappers; +import com.schematic.api.logger.SchematicLogger; +import com.schematic.api.types.RulesengineCheckFlagResult; +import com.schematic.api.types.RulesengineCompany; +import com.schematic.api.types.RulesengineFlag; +import com.schematic.api.types.RulesengineUser; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Iterator; +import java.util.Map; + +/** + * WASM-based rules engine for local flag evaluation. + * + *

Loads the Schematic rules engine WASM binary (compiled from Rust) and evaluates + * flag rules locally using cached flag definitions, company data, and user data. + * Uses the Chicory pure-Java WASM runtime (no native dependencies). + * + *

Data flow matches the Python/Node SDKs: + *

    + *
  • Input: typed objects serialized to snake_case JSON via Jackson @JsonProperty annotations
  • + *
  • Output: camelCase JSON from WASM, converted to snake_case, deserialized into generated types
  • + *
+ */ +public class WasmRulesEngine implements RulesEngine { + + private static final String DEFAULT_WASM_RESOURCE = "/wasm/rulesengine.wasm"; + + private final SchematicLogger logger; + private final String wasmResourcePath; + + private volatile boolean initialized; + private Instance instance; + private Memory memory; + private ExportFunction allocFn; + private ExportFunction deallocFn; + private ExportFunction checkFlagFn; + private ExportFunction getResultJsonFn; + private ExportFunction getResultJsonLengthFn; + + public WasmRulesEngine(SchematicLogger logger) { + this(logger, DEFAULT_WASM_RESOURCE); + } + + public WasmRulesEngine(SchematicLogger logger, String wasmResourcePath) { + this.logger = logger; + this.wasmResourcePath = wasmResourcePath; + this.initialized = false; + } + + /** + * Initializes the WASM rules engine by loading the binary and resolving exports. + * Safe to call multiple times (idempotent). + */ + public synchronized void initialize() { + if (initialized) { + return; + } + + try { + log("info", "Initializing WASM rules engine from " + wasmResourcePath); + + InputStream wasmStream = getClass().getResourceAsStream(wasmResourcePath); + if (wasmStream == null) { + throw new RuntimeException("WASM binary not found at resource path: " + wasmResourcePath); + } + + WasmModule module = Parser.parse(wasmStream); + + WasiOptions wasiOptions = WasiOptions.builder().build(); + WasiPreview1 wasi = WasiPreview1.builder().withOptions(wasiOptions).build(); + HostFunction[] hostFunctions = WasiPreview1_ModuleFactory.toHostFunctions(wasi); + + ImportValues importValues = ImportValues.builder() + .withFunctions(Arrays.asList(hostFunctions)) + .build(); + + instance = Instance.builder(module).withImportValues(importValues).build(); + + memory = instance.memory(); + allocFn = instance.export("alloc"); + deallocFn = instance.export("dealloc"); + checkFlagFn = instance.export("checkFlagCombined"); + getResultJsonFn = instance.export("getResultJson"); + getResultJsonLengthFn = instance.export("getResultJsonLength"); + + initialized = true; + log("info", "WASM rules engine initialized successfully"); + } catch (Exception e) { + log("error", "Failed to initialize WASM rules engine: " + e.getMessage()); + throw new RuntimeException("Failed to initialize WASM rules engine", e); + } + } + + @Override + public boolean isInitialized() { + return initialized; + } + + @Override + public RulesengineCheckFlagResult checkFlag(RulesengineFlag flag, RulesengineCompany company, RulesengineUser user) + throws Exception { + if (!initialized) { + throw new IllegalStateException("WASM rules engine not initialized"); + } + + // Build envelope using the SDK's ObjectMapper — @JsonProperty annotations + // produce snake_case JSON which the WASM engine accepts + ObjectMapper mapper = ObjectMappers.JSON_MAPPER; + ObjectNode envelope = mapper.createObjectNode(); + envelope.set("flag", mapper.valueToTree(flag)); + if (company != null) { + envelope.set("company", mapper.valueToTree(company)); + } + if (user != null) { + envelope.set("user", mapper.valueToTree(user)); + } + + String inputJson = mapper.writeValueAsString(envelope); + String resultJson = callWasm(inputJson); + + // WASM returns camelCase JSON; generated types expect snake_case. + // Convert keys before deserializing, matching the Python SDK's approach. + JsonNode camelNode = mapper.readTree(resultJson); + JsonNode snakeNode = camelToSnakeKeys(camelNode); + + return mapper.treeToValue(snakeNode, RulesengineCheckFlagResult.class); + } + + /** + * Returns the rules engine version key, used for cache invalidation. + */ + public String getVersionKey() { + if (!initialized) { + return null; + } + try { + ExportFunction versionKeyFn = instance.export("get_version_key_wasm"); + long[] result = versionKeyFn.apply(); + int ptr = (int) result[0]; + return memory.readCString(ptr); + } catch (Exception e) { + log("warn", "Failed to get WASM version key: " + e.getMessage()); + return null; + } + } + + private synchronized String callWasm(String inputJson) { + byte[] data = inputJson.getBytes(StandardCharsets.UTF_8); + int length = data.length; + + long[] allocResult = allocFn.apply(length); + int ptr = (int) allocResult[0]; + + try { + memory.write(ptr, data); + + long[] checkResult = checkFlagFn.apply(ptr, length); + int resultLen = (int) checkResult[0]; + + if (resultLen < 0) { + throw new RuntimeException("WASM checkFlagCombined returned error code: " + resultLen); + } + + long[] resultPtrArr = getResultJsonFn.apply(); + int resultPtr = (int) resultPtrArr[0]; + + long[] resultLenArr = getResultJsonLengthFn.apply(); + int actualLen = (int) resultLenArr[0]; + + byte[] resultBytes = memory.readBytes(resultPtr, actualLen); + return new String(resultBytes, StandardCharsets.UTF_8); + } finally { + deallocFn.apply(ptr, length); + } + } + + /** + * Recursively converts JSON object keys from camelCase to snake_case. + * Matches the Python SDK's _deep_camel_to_snake() function. + */ + static JsonNode camelToSnakeKeys(JsonNode node) { + if (node.isObject()) { + ObjectMapper mapper = ObjectMappers.JSON_MAPPER; + ObjectNode result = mapper.createObjectNode(); + Iterator> fields = node.fields(); + while (fields.hasNext()) { + Map.Entry field = fields.next(); + String snakeKey = camelToSnake(field.getKey()); + result.set(snakeKey, camelToSnakeKeys(field.getValue())); + } + return result; + } else if (node.isArray()) { + ObjectMapper mapper = ObjectMappers.JSON_MAPPER; + com.fasterxml.jackson.databind.node.ArrayNode result = mapper.createArrayNode(); + for (JsonNode element : node) { + result.add(camelToSnakeKeys(element)); + } + return result; + } + return node; + } + + /** + * Converts a camelCase string to snake_case. + */ + static String camelToSnake(String name) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < name.length(); i++) { + char c = name.charAt(i); + if (Character.isUpperCase(c)) { + if (i > 0) { + sb.append('_'); + } + sb.append(Character.toLowerCase(c)); + } else { + sb.append(c); + } + } + return sb.toString(); + } + + private void log(String level, String message) { + if (logger == null) { + return; + } + switch (level) { + case "info": + logger.info(message); + break; + case "warn": + logger.warn(message); + break; + case "error": + logger.error(message); + break; + default: + logger.debug(message); + break; + } + } +} diff --git a/src/test/java/com/schematic/api/TestSchematic.java b/src/test/java/com/schematic/api/TestSchematic.java index bf84587..9c456d7 100644 --- a/src/test/java/com/schematic/api/TestSchematic.java +++ b/src/test/java/com/schematic/api/TestSchematic.java @@ -14,6 +14,7 @@ import com.schematic.api.types.CheckFlagRequestBody; import com.schematic.api.types.CheckFlagResponseData; import com.schematic.api.types.EventBodyIdentifyCompany; +import com.schematic.api.types.RulesengineCheckFlagResult; import java.time.Duration; import java.util.Collections; import java.util.HashMap; @@ -38,7 +39,7 @@ void setUp() { .apiKey("test_api_key") .logger(logger) .eventBufferInterval(DEFAULT_BUFFER_PERIOD) - .cacheProviders(Collections.singletonList(new LocalCache())) + .cacheProviders(Collections.singletonList(new LocalCache())) .build(); } @@ -55,7 +56,7 @@ void checkFlag_HandlesNullData() { boolean result = spySchematic.checkFlag("test_flag", null, null); assertFalse(result); - verify(logger).error(contains("Error checking flag")); + verify(logger).error(contains("Error checking flag via API")); } @Test @@ -78,16 +79,24 @@ void checkFlag_CachesResultIfNotCached() { boolean result = spySchematic.checkFlag("test_flag", null, null); assertTrue(result); - for (CacheProvider provider : spySchematic.getFlagCheckCacheProviders()) { - assertEquals(true, provider.get("test_flag")); + for (CacheProvider provider : spySchematic.getFlagCheckCacheProviders()) { + RulesengineCheckFlagResult cached = provider.get("test_flag"); + assertNotNull(cached); + assertTrue(cached.getValue()); + assertEquals("test_reason", cached.getReason()); } } @Test void checkFlag_ReturnsCachedValue() { String flagKey = "test_flag"; - for (CacheProvider provider : schematic.getFlagCheckCacheProviders()) { - provider.set(flagKey, true); + RulesengineCheckFlagResult cachedResult = RulesengineCheckFlagResult.builder() + .flagKey(flagKey) + .reason("test_reason") + .value(true) + .build(); + for (CacheProvider provider : schematic.getFlagCheckCacheProviders()) { + provider.set(flagKey, cachedResult); } boolean result = schematic.checkFlag(flagKey, null, null); @@ -102,8 +111,13 @@ void checkFlag_UsesCorrectCacheKey() { Map company = Collections.singletonMap("name", "test_company"); Map user = Collections.singletonMap("id", "unique_id"); - for (CacheProvider provider : schematic.getFlagCheckCacheProviders()) { - provider.set(expectedCacheKey, true); + RulesengineCheckFlagResult cachedResult = RulesengineCheckFlagResult.builder() + .flagKey(flagKey) + .reason("test_reason") + .value(true) + .build(); + for (CacheProvider provider : schematic.getFlagCheckCacheProviders()) { + provider.set(expectedCacheKey, cachedResult); } boolean result = schematic.checkFlag(flagKey, company, user); @@ -126,7 +140,7 @@ void checkFlag_ReturnsDefaultOnError() { boolean result = spySchematic.checkFlag("error_flag", null, null); assertTrue(result); - verify(logger).error(contains("Error checking flag")); + verify(logger).error(contains("Error checking flag via API")); } @Test diff --git a/src/test/java/com/schematic/api/datastream/DataStreamClientTest.java b/src/test/java/com/schematic/api/datastream/DataStreamClientTest.java new file mode 100644 index 0000000..afe4ec5 --- /dev/null +++ b/src/test/java/com/schematic/api/datastream/DataStreamClientTest.java @@ -0,0 +1,441 @@ +package com.schematic.api.datastream; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.Mockito.*; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.schematic.api.datastream.DataStreamMessages.DataStreamResp; +import com.schematic.api.datastream.DataStreamMessages.EntityType; +import com.schematic.api.datastream.DataStreamMessages.MessageType; +import com.schematic.api.logger.SchematicLogger; +import com.schematic.api.types.RulesengineCheckFlagResult; +import com.schematic.api.types.RulesengineCompany; +import com.schematic.api.types.RulesengineFlag; +import com.schematic.api.types.RulesengineUser; +import java.time.Duration; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class DataStreamClientTest { + + @Mock + private SchematicLogger logger; + + private ObjectMapper objectMapper; + private DataStreamClient client; + + @BeforeEach + void setUp() { + objectMapper = new ObjectMapper(); + DatastreamOptions options = + DatastreamOptions.builder().cacheTTL(Duration.ofMinutes(5)).build(); + client = new DataStreamClient(options, "test-api-key", "https://api.schematichq.com", logger); + } + + @AfterEach + void tearDown() { + if (client != null) { + client.close(); + } + } + + // --- Flag caching tests --- + + @Test + void handleMessage_fullFlag_cachesFlag() { + DataStreamResp resp = + buildResp(EntityType.FLAG.getValue(), MessageType.FULL.getValue(), null, flagNode("test-flag", true)); + + client.handleMessage(resp); + + RulesengineFlag cached = client.getCachedFlag("test-flag"); + assertNotNull(cached); + assertEquals("test-flag", cached.getKey()); + assertTrue(cached.getDefaultValue()); + } + + @Test + void handleMessage_fullFlags_cachesMultipleFlags() { + ObjectNode flag1 = flagNode("flag-1", true); + ObjectNode flag2 = flagNode("flag-2", false); + ArrayNode arrayData = objectMapper.createArrayNode(); + arrayData.add(flag1).add(flag2); + + DataStreamResp resp = buildResp(EntityType.FLAGS.getValue(), MessageType.FULL.getValue(), null, arrayData); + + client.handleMessage(resp); + + assertNotNull(client.getCachedFlag("flag-1")); + assertNotNull(client.getCachedFlag("flag-2")); + } + + @Test + void handleMessage_deleteFlag_removesFromCache() { + // Cache a flag + client.handleMessage( + buildResp(EntityType.FLAG.getValue(), MessageType.FULL.getValue(), null, flagNode("delete-me", true))); + assertNotNull(client.getCachedFlag("delete-me")); + + // Delete it + ObjectNode deleteData = objectMapper.createObjectNode(); + deleteData.put("key", "delete-me"); + client.handleMessage(buildResp(EntityType.FLAG.getValue(), MessageType.DELETE.getValue(), null, deleteData)); + + assertNull(client.getCachedFlag("delete-me")); + } + + @Test + void getCachedFlag_returnsNullWhenNotCached() { + assertNull(client.getCachedFlag("nonexistent")); + } + + // --- Company caching tests --- + + @Test + void handleMessage_fullCompany_cachesCompanyAndKeys() { + ObjectNode company = companyNode("comp-1", "customer_id", "cust-123"); + + client.handleMessage(buildResp(EntityType.COMPANY.getValue(), MessageType.FULL.getValue(), "comp-1", company)); + + Map keys = new HashMap<>(); + keys.put("customer_id", "cust-123"); + RulesengineCompany cached = client.getCachedCompany(keys); + assertNotNull(cached); + assertEquals("comp-1", cached.getId()); + } + + @Test + void handleMessage_fullCompanies_cachesMultiple() { + ObjectNode comp1 = companyNode("c1", "id", "c1-key"); + ObjectNode comp2 = companyNode("c2", "id", "c2-key"); + ArrayNode arrayData = objectMapper.createArrayNode(); + arrayData.add(comp1).add(comp2); + + client.handleMessage(buildResp(EntityType.COMPANIES.getValue(), MessageType.FULL.getValue(), null, arrayData)); + + assertNotNull(client.getCachedCompany(Collections.singletonMap("id", "c1-key"))); + assertNotNull(client.getCachedCompany(Collections.singletonMap("id", "c2-key"))); + } + + @Test + void getCachedCompany_returnsNullForNullKeys() { + assertNull(client.getCachedCompany(null)); + assertNull(client.getCachedCompany(new HashMap<>())); + } + + @Test + void handleMessage_partialCompany_mergesKeys() { + // Cache a company + client.handleMessage(buildResp( + EntityType.COMPANY.getValue(), + MessageType.FULL.getValue(), + "comp-1", + companyNode("comp-1", "customer_id", "cust-123"))); + + // Send partial update adding a new key + ObjectNode partial = objectMapper.createObjectNode(); + partial.put("id", "comp-1"); + ObjectNode newKeys = objectMapper.createObjectNode(); + newKeys.put("external_id", "ext-456"); + partial.set("keys", newKeys); + + client.handleMessage( + buildResp(EntityType.COMPANY.getValue(), MessageType.PARTIAL.getValue(), "comp-1", partial)); + + // Both old and new keys should work for lookup + assertNotNull(client.getCachedCompany(Collections.singletonMap("customer_id", "cust-123"))); + assertNotNull(client.getCachedCompany(Collections.singletonMap("external_id", "ext-456"))); + } + + @Test + void handleMessage_partialUser_mergesKeys() { + client.handleMessage(buildResp( + EntityType.USER.getValue(), + MessageType.FULL.getValue(), + "user-1", + userNode("user-1", "email", "test@example.com"))); + + ObjectNode partial = objectMapper.createObjectNode(); + partial.put("id", "user-1"); + ObjectNode newKeys = objectMapper.createObjectNode(); + newKeys.put("external_id", "ext-789"); + partial.set("keys", newKeys); + + client.handleMessage(buildResp(EntityType.USER.getValue(), MessageType.PARTIAL.getValue(), "user-1", partial)); + + assertNotNull(client.getCachedUser(Collections.singletonMap("email", "test@example.com"))); + assertNotNull(client.getCachedUser(Collections.singletonMap("external_id", "ext-789"))); + } + + // --- User caching tests --- + + @Test + void handleMessage_fullUser_cachesUserAndKeys() { + ObjectNode user = userNode("user-1", "email", "test@example.com"); + + client.handleMessage(buildResp(EntityType.USER.getValue(), MessageType.FULL.getValue(), "user-1", user)); + + Map keys = new HashMap<>(); + keys.put("email", "test@example.com"); + RulesengineUser cached = client.getCachedUser(keys); + assertNotNull(cached); + assertEquals("user-1", cached.getId()); + } + + @Test + void getCachedUser_returnsNullForNullKeys() { + assertNull(client.getCachedUser(null)); + assertNull(client.getCachedUser(new HashMap<>())); + } + + // --- Error handling tests --- + + @Test + void handleMessage_errorMessage_logsError() { + ObjectNode errorData = objectMapper.createObjectNode(); + errorData.put("error", "something went wrong"); + + client.handleMessage(buildResp(EntityType.FLAG.getValue(), MessageType.ERROR.getValue(), null, errorData)); + + verify(logger).error(contains("DataStream error")); + } + + @Test + void handleMessage_unknownEntityType_logsWarning() { + client.handleMessage( + buildResp("rulesengine.Unknown", MessageType.FULL.getValue(), null, objectMapper.createObjectNode())); + + verify(logger).warn(contains("unknown entity type")); + } + + @Test + void handleMessage_nullData_doesNotThrow() { + DataStreamResp resp = buildResp(EntityType.FLAG.getValue(), MessageType.FULL.getValue(), null, null); + assertDoesNotThrow(() -> client.handleMessage(resp)); + } + + // --- checkFlag tests --- + + @Test + void checkFlag_throwsWhenFlagNotInCache() { + assertThrows(DataStreamException.class, () -> client.checkFlag("nonexistent", null, null)); + } + + @Test + void checkFlag_returnsDefaultWhenNoRulesEngine() { + client.handleMessage( + buildResp(EntityType.FLAG.getValue(), MessageType.FULL.getValue(), null, flagNode("my-flag", true))); + + RulesengineCheckFlagResult result = client.checkFlag("my-flag", null, null); + assertEquals("my-flag", result.getFlagKey()); + assertTrue(result.getValue()); + assertEquals("RULES_ENGINE_UNAVAILABLE", result.getReason()); + } + + @Test + void checkFlag_returnsDefaultFalseWhenNoRulesEngine() { + client.handleMessage(buildResp( + EntityType.FLAG.getValue(), MessageType.FULL.getValue(), null, flagNode("false-flag", false))); + + RulesengineCheckFlagResult result = client.checkFlag("false-flag", null, null); + assertFalse(result.getValue()); + assertEquals("RULES_ENGINE_UNAVAILABLE", result.getReason()); + } + + @Test + void checkFlag_withRulesEngine_evaluatesLocally() throws Exception { + RulesEngine mockEngine = mock(RulesEngine.class); + when(mockEngine.isInitialized()).thenReturn(true); + when(mockEngine.checkFlag(any(), any(), any())) + .thenReturn(RulesengineCheckFlagResult.builder() + .flagKey("engine-flag") + .reason("RULES_ENGINE_EVALUATION") + .value(true) + .ruleId("rule-123") + .build()); + + DatastreamOptions opts = DatastreamOptions.builder().build(); + DataStreamClient clientWithEngine = + new DataStreamClient(opts, "test-key", "https://api.schematichq.com", logger, mockEngine); + + clientWithEngine.handleMessage(buildResp( + EntityType.FLAG.getValue(), MessageType.FULL.getValue(), null, flagNode("engine-flag", false))); + + RulesengineCheckFlagResult result = clientWithEngine.checkFlag("engine-flag", null, null); + assertTrue(result.getValue()); + assertEquals("RULES_ENGINE_EVALUATION", result.getReason()); + assertEquals("rule-123", result.getRuleId().orElse(null)); + + clientWithEngine.close(); + } + + @Test + void checkFlag_withRulesEngine_fallsBackOnError() throws Exception { + RulesEngine mockEngine = mock(RulesEngine.class); + when(mockEngine.isInitialized()).thenReturn(true); + when(mockEngine.checkFlag(any(), any(), any())).thenThrow(new RuntimeException("WASM error")); + + DatastreamOptions opts = DatastreamOptions.builder().build(); + DataStreamClient clientWithEngine = + new DataStreamClient(opts, "test-key", "https://api.schematichq.com", logger, mockEngine); + + clientWithEngine.handleMessage( + buildResp(EntityType.FLAG.getValue(), MessageType.FULL.getValue(), null, flagNode("error-flag", true))); + + RulesengineCheckFlagResult result = clientWithEngine.checkFlag("error-flag", null, null); + assertTrue(result.getValue()); + assertEquals("RULES_ENGINE_ERROR", result.getReason()); + assertEquals("WASM error", result.getErr().orElse(null)); + + clientWithEngine.close(); + } + + @Test + void checkFlag_withCachedCompanyAndUser() { + client.handleMessage( + buildResp(EntityType.FLAG.getValue(), MessageType.FULL.getValue(), null, flagNode("ctx-flag", true))); + client.handleMessage(buildResp( + EntityType.COMPANY.getValue(), + MessageType.FULL.getValue(), + "comp-1", + companyNode("comp-1", "customer_id", "cust-123"))); + client.handleMessage(buildResp( + EntityType.USER.getValue(), + MessageType.FULL.getValue(), + "user-1", + userNode("user-1", "email", "test@example.com"))); + + Map companyKeys = Collections.singletonMap("customer_id", "cust-123"); + Map userKeys = Collections.singletonMap("email", "test@example.com"); + + RulesengineCheckFlagResult result = client.checkFlag("ctx-flag", companyKeys, userKeys); + assertEquals("ctx-flag", result.getFlagKey()); + assertEquals("RULES_ENGINE_UNAVAILABLE", result.getReason()); + assertEquals("comp-1", result.getCompanyId().orElse(null)); + assertEquals("user-1", result.getUserId().orElse(null)); + } + + // --- evaluateFlag tests --- + + @Test + void evaluateFlag_extractsFlagId() { + RulesengineFlag flag = RulesengineFlag.builder() + .accountId("acc_1") + .defaultValue(true) + .environmentId("env_1") + .id("flag-uuid-123") + .key("id-flag") + .rules(Collections.emptyList()) + .build(); + + RulesengineCheckFlagResult result = client.evaluateFlag(flag, null, null); + assertEquals("flag-uuid-123", result.getFlagId().orElse(null)); + assertTrue(result.getValue()); + } + + // --- Replicator mode tests --- + + @Test + void isReplicatorMode_falseByDefault() { + assertFalse(client.isReplicatorMode()); + } + + @Test + void isReplicatorMode_trueWhenConfigured() { + DatastreamOptions options = DatastreamOptions.builder() + .withReplicatorMode("http://localhost:8090/ready") + .build(); + DataStreamClient replicatorClient = + new DataStreamClient(options, "test-key", "https://api.schematichq.com", logger); + + assertTrue(replicatorClient.isReplicatorMode()); + replicatorClient.close(); + } + + @Test + void isConnected_falseInitially() { + assertFalse(client.isConnected()); + } + + // --- Close tests --- + + @Test + void close_isIdempotent() { + client.close(); + assertDoesNotThrow(() -> client.close()); + } + + @Test + void start_throwsAfterClose() { + client.close(); + assertThrows(IllegalStateException.class, () -> client.start()); + } + + // --- Helper methods: build JSON matching the generated type's @JsonProperty format --- + + private ObjectNode flagNode(String key, boolean defaultValue) { + ObjectNode node = objectMapper.createObjectNode(); + node.put("key", key); + node.put("id", "flag_" + key); + node.put("account_id", "acc_1"); + node.put("environment_id", "env_1"); + node.put("default_value", defaultValue); + node.set("rules", objectMapper.createArrayNode()); + return node; + } + + private ObjectNode companyNode(String id, String keyName, String keyValue) { + ObjectNode node = objectMapper.createObjectNode(); + node.put("id", id); + node.put("account_id", "acc_1"); + node.put("environment_id", "env_1"); + ObjectNode keys = objectMapper.createObjectNode(); + keys.put(keyName, keyValue); + node.set("keys", keys); + node.set("traits", objectMapper.createArrayNode()); + node.set("metrics", objectMapper.createArrayNode()); + node.set("rules", objectMapper.createArrayNode()); + node.set("billing_product_ids", objectMapper.createArrayNode()); + node.set("credit_balances", objectMapper.createObjectNode()); + node.set("plan_ids", objectMapper.createArrayNode()); + node.set("plan_version_ids", objectMapper.createArrayNode()); + return node; + } + + private ObjectNode userNode(String id, String keyName, String keyValue) { + ObjectNode node = objectMapper.createObjectNode(); + node.put("id", id); + node.put("account_id", "acc_1"); + node.put("environment_id", "env_1"); + ObjectNode keys = objectMapper.createObjectNode(); + keys.put(keyName, keyValue); + node.set("keys", keys); + node.set("traits", objectMapper.createArrayNode()); + node.set("rules", objectMapper.createArrayNode()); + return node; + } + + private DataStreamResp buildResp(String entityType, String messageType, String entityId, JsonNode data) { + ObjectNode respNode = objectMapper.createObjectNode(); + respNode.put("entity_type", entityType); + respNode.put("message_type", messageType); + if (entityId != null) { + respNode.put("entity_id", entityId); + } + if (data != null) { + respNode.set("data", data); + } + return objectMapper.convertValue(respNode, DataStreamResp.class); + } +} diff --git a/src/test/java/com/schematic/api/datastream/EntityMergeTest.java b/src/test/java/com/schematic/api/datastream/EntityMergeTest.java new file mode 100644 index 0000000..a46fd34 --- /dev/null +++ b/src/test/java/com/schematic/api/datastream/EntityMergeTest.java @@ -0,0 +1,217 @@ +package com.schematic.api.datastream; + +import static org.junit.jupiter.api.Assertions.*; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.schematic.api.core.ObjectMappers; +import com.schematic.api.types.RulesengineCompany; +import com.schematic.api.types.RulesengineUser; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import org.junit.jupiter.api.Test; + +class EntityMergeTest { + + private final ObjectMapper objectMapper = ObjectMappers.JSON_MAPPER; + + // --- Company partial merge tests --- + + @Test + void partialCompany_mergesKeysAdditively() { + Map existingKeys = new HashMap<>(); + existingKeys.put("customer_id", "cust-1"); + existingKeys.put("org_id", "org-1"); + + RulesengineCompany existing = buildCompany("comp-1", existingKeys); + + ObjectNode partial = objectMapper.createObjectNode(); + partial.put("id", "comp-1"); + ObjectNode newKeys = objectMapper.createObjectNode(); + newKeys.put("customer_id", "cust-updated"); + newKeys.put("external_id", "ext-1"); + partial.set("keys", newKeys); + + RulesengineCompany merged = EntityMerge.partialCompany(existing, partial); + + // Original key preserved, updated key changed, new key added + assertEquals("cust-updated", merged.getKeys().get("customer_id")); + assertEquals("org-1", merged.getKeys().get("org_id")); + assertEquals("ext-1", merged.getKeys().get("external_id")); + } + + @Test + void partialCompany_mergesCreditBalancesAdditively() { + Map existingBalances = new HashMap<>(); + existingBalances.put("credits_a", 100.0); + existingBalances.put("credits_b", 50.0); + + RulesengineCompany existing = RulesengineCompany.builder() + .accountId("acc_1") + .environmentId("env_1") + .id("comp-1") + .keys(Collections.singletonMap("id", "comp-1")) + .traits(Collections.emptyList()) + .metrics(Collections.emptyList()) + .rules(Collections.emptyList()) + .billingProductIds(Collections.emptyList()) + .creditBalances(existingBalances) + .planIds(Collections.emptyList()) + .planVersionIds(Collections.emptyList()) + .build(); + + ObjectNode partial = objectMapper.createObjectNode(); + partial.put("id", "comp-1"); + ObjectNode newBalances = objectMapper.createObjectNode(); + newBalances.put("credits_a", 75.0); + newBalances.put("credits_c", 200.0); + partial.set("credit_balances", newBalances); + + RulesengineCompany merged = EntityMerge.partialCompany(existing, partial); + + assertEquals(75.0, merged.getCreditBalances().get("credits_a")); + assertEquals(50.0, merged.getCreditBalances().get("credits_b")); + assertEquals(200.0, merged.getCreditBalances().get("credits_c")); + } + + @Test + void partialCompany_replacesOtherFields() { + RulesengineCompany existing = buildCompany("comp-1", Collections.singletonMap("id", "comp-1")); + + ObjectNode partial = objectMapper.createObjectNode(); + partial.put("id", "comp-1"); + partial.put("account_id", "acc_new"); + + RulesengineCompany merged = EntityMerge.partialCompany(existing, partial); + + assertEquals("acc_new", merged.getAccountId()); + // Untouched fields preserved + assertEquals("env_1", merged.getEnvironmentId()); + } + + @Test + void partialCompany_upsertsMetrics() { + RulesengineCompany existing = buildCompany("comp-1", Collections.singletonMap("id", "comp-1")); + + // Add initial metrics via a full update + ObjectNode fullData = (ObjectNode) objectMapper.valueToTree(existing); + ArrayNode existingMetrics = objectMapper.createArrayNode(); + ObjectNode metric1 = objectMapper.createObjectNode(); + metric1.put("event_subtype", "api_calls"); + metric1.put("period", "monthly"); + metric1.put("month_reset", "first"); + metric1.put("value", 10); + existingMetrics.add(metric1); + fullData.set("metrics", existingMetrics); + existing = objectMapper.convertValue(fullData, RulesengineCompany.class); + + // Partial update with matching metric (should replace) and new metric (should append) + ObjectNode partial = objectMapper.createObjectNode(); + partial.put("id", "comp-1"); + ArrayNode partialMetrics = objectMapper.createArrayNode(); + ObjectNode updatedMetric = objectMapper.createObjectNode(); + updatedMetric.put("event_subtype", "api_calls"); + updatedMetric.put("period", "monthly"); + updatedMetric.put("month_reset", "first"); + updatedMetric.put("value", 25); + partialMetrics.add(updatedMetric); + ObjectNode newMetric = objectMapper.createObjectNode(); + newMetric.put("event_subtype", "page_views"); + newMetric.put("period", "daily"); + newMetric.put("month_reset", ""); + newMetric.put("value", 5); + partialMetrics.add(newMetric); + partial.set("metrics", partialMetrics); + + RulesengineCompany merged = EntityMerge.partialCompany(existing, partial); + + assertEquals(2, merged.getMetrics().size()); + } + + // --- User partial merge tests --- + + @Test + void partialUser_mergesKeysAdditively() { + Map existingKeys = new HashMap<>(); + existingKeys.put("email", "old@example.com"); + existingKeys.put("user_id", "u-1"); + + RulesengineUser existing = buildUser("user-1", existingKeys); + + ObjectNode partial = objectMapper.createObjectNode(); + partial.put("id", "user-1"); + ObjectNode newKeys = objectMapper.createObjectNode(); + newKeys.put("email", "new@example.com"); + newKeys.put("external_id", "ext-1"); + partial.set("keys", newKeys); + + RulesengineUser merged = EntityMerge.partialUser(existing, partial); + + assertEquals("new@example.com", merged.getKeys().get("email")); + assertEquals("u-1", merged.getKeys().get("user_id")); + assertEquals("ext-1", merged.getKeys().get("external_id")); + } + + @Test + void partialUser_replacesOtherFields() { + RulesengineUser existing = buildUser("user-1", Collections.singletonMap("id", "user-1")); + + ObjectNode partial = objectMapper.createObjectNode(); + partial.put("id", "user-1"); + partial.put("account_id", "acc_new"); + + RulesengineUser merged = EntityMerge.partialUser(existing, partial); + + assertEquals("acc_new", merged.getAccountId()); + assertEquals("env_1", merged.getEnvironmentId()); + } + + @Test + void partialUser_onlyAppliesPresentFields() { + Map existingKeys = new HashMap<>(); + existingKeys.put("email", "test@example.com"); + + RulesengineUser existing = buildUser("user-1", existingKeys); + + // Partial with only account_id — keys should be untouched + ObjectNode partial = objectMapper.createObjectNode(); + partial.put("id", "user-1"); + partial.put("account_id", "acc_2"); + + RulesengineUser merged = EntityMerge.partialUser(existing, partial); + + assertEquals("acc_2", merged.getAccountId()); + assertEquals("test@example.com", merged.getKeys().get("email")); + } + + // --- Helpers --- + + private RulesengineCompany buildCompany(String id, Map keys) { + return RulesengineCompany.builder() + .accountId("acc_1") + .environmentId("env_1") + .id(id) + .keys(keys) + .traits(Collections.emptyList()) + .metrics(Collections.emptyList()) + .rules(Collections.emptyList()) + .billingProductIds(Collections.emptyList()) + .creditBalances(Collections.emptyMap()) + .planIds(Collections.emptyList()) + .planVersionIds(Collections.emptyList()) + .build(); + } + + private RulesengineUser buildUser(String id, Map keys) { + return RulesengineUser.builder() + .accountId("acc_1") + .environmentId("env_1") + .id(id) + .keys(keys) + .traits(Collections.emptyList()) + .rules(Collections.emptyList()) + .build(); + } +} diff --git a/src/test/java/com/schematic/api/datastream/TestDataStreamWebSocketClient.java b/src/test/java/com/schematic/api/datastream/TestDataStreamWebSocketClient.java index a6572f3..9c2a338 100644 --- a/src/test/java/com/schematic/api/datastream/TestDataStreamWebSocketClient.java +++ b/src/test/java/com/schematic/api/datastream/TestDataStreamWebSocketClient.java @@ -32,8 +32,7 @@ void convertApiUrlToWebSocketUrl_apiSubdomainWithHttps() { @Test void convertApiUrlToWebSocketUrl_apiSubdomainWithStaging() { - String result = - DataStreamWebSocketClient.convertApiUrlToWebSocketUrl("https://api.staging.schematichq.com"); + String result = DataStreamWebSocketClient.convertApiUrlToWebSocketUrl("https://api.staging.schematichq.com"); assertEquals("wss://datastream.staging.schematichq.com/datastream", result); } diff --git a/src/test/java/com/schematic/api/datastream/WasmRulesEngineTest.java b/src/test/java/com/schematic/api/datastream/WasmRulesEngineTest.java new file mode 100644 index 0000000..8d13a08 --- /dev/null +++ b/src/test/java/com/schematic/api/datastream/WasmRulesEngineTest.java @@ -0,0 +1,189 @@ +package com.schematic.api.datastream; + +import static org.junit.jupiter.api.Assertions.*; + +import com.schematic.api.logger.SchematicLogger; +import com.schematic.api.types.RulesengineCheckFlagResult; +import com.schematic.api.types.RulesengineCompany; +import com.schematic.api.types.RulesengineFlag; +import com.schematic.api.types.RulesengineRule; +import com.schematic.api.types.RulesengineRuleRuleType; +import com.schematic.api.types.RulesengineUser; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class WasmRulesEngineTest { + + @Mock + private static SchematicLogger logger; + + private static WasmRulesEngine engine; + + @BeforeAll + static void setUp() { + engine = new WasmRulesEngine(logger); + engine.initialize(); + } + + @Test + void initialize_isIdempotent() { + engine.initialize(); + assertTrue(engine.isInitialized()); + } + + @Test + void getVersionKey_returnsNonNull() { + String versionKey = engine.getVersionKey(); + assertNotNull(versionKey); + assertFalse(versionKey.isEmpty()); + } + + @Test + void checkFlag_globalOverrideTrue() throws Exception { + List rules = new ArrayList<>(); + rules.add(buildRule("rule1", "global_override", true, 1)); + + RulesengineFlag flag = buildFlag("test-flag", false, rules); + + RulesengineCheckFlagResult result = engine.checkFlag(flag, null, null); + + assertNotNull(result); + assertEquals("test-flag", result.getFlagKey()); + assertTrue(result.getValue()); + } + + @Test + void checkFlag_defaultValueFalse_noRules() throws Exception { + RulesengineFlag flag = buildFlag("default-flag", false, Collections.emptyList()); + + RulesengineCheckFlagResult result = engine.checkFlag(flag, null, null); + + assertNotNull(result); + assertEquals("default-flag", result.getFlagKey()); + assertFalse(result.getValue()); + } + + @Test + void checkFlag_defaultValueTrue_noRules() throws Exception { + RulesengineFlag flag = buildFlag("true-flag", true, Collections.emptyList()); + + RulesengineCheckFlagResult result = engine.checkFlag(flag, null, null); + + assertNotNull(result); + assertEquals("true-flag", result.getFlagKey()); + assertTrue(result.getValue()); + } + + @Test + void checkFlag_withCompanyContext() throws Exception { + List rules = new ArrayList<>(); + rules.add(buildRule("rule1", "global_override", true, 1)); + RulesengineFlag flag = buildFlag("company-flag", false, rules); + + RulesengineCompany company = buildCompany("comp-1"); + + RulesengineCheckFlagResult result = engine.checkFlag(flag, company, null); + + assertNotNull(result); + assertTrue(result.getValue()); + } + + @Test + void checkFlag_withUserContext() throws Exception { + List rules = new ArrayList<>(); + rules.add(buildRule("rule1", "global_override", true, 1)); + RulesengineFlag flag = buildFlag("user-flag", false, rules); + + RulesengineUser user = buildUser("user-1"); + + RulesengineCheckFlagResult result = engine.checkFlag(flag, null, user); + + assertNotNull(result); + assertTrue(result.getValue()); + } + + @Test + void checkFlag_withCompanyAndUserContext() throws Exception { + List rules = new ArrayList<>(); + rules.add(buildRule("rule1", "global_override", true, 1)); + RulesengineFlag flag = buildFlag("full-flag", false, rules); + + RulesengineCompany company = buildCompany("comp-1"); + RulesengineUser user = buildUser("user-1"); + + RulesengineCheckFlagResult result = engine.checkFlag(flag, company, user); + + assertNotNull(result); + assertTrue(result.getValue()); + } + + @Test + void camelToSnake_convertsCorrectly() { + assertEquals("flag_key", WasmRulesEngine.camelToSnake("flagKey")); + assertEquals("company_id", WasmRulesEngine.camelToSnake("companyId")); + assertEquals("feature_usage_reset_at", WasmRulesEngine.camelToSnake("featureUsageResetAt")); + assertEquals("value", WasmRulesEngine.camelToSnake("value")); + assertEquals("id", WasmRulesEngine.camelToSnake("id")); + } + + // --- Helpers using generated types --- + + private RulesengineFlag buildFlag(String key, boolean defaultValue, List rules) { + return RulesengineFlag.builder() + .accountId("acc_1") + .defaultValue(defaultValue) + .environmentId("env_1") + .id("flag_" + key) + .key(key) + .rules(rules) + .build(); + } + + private RulesengineRule buildRule(String id, String ruleType, boolean value, int priority) { + return RulesengineRule.builder() + .accountId("acc_1") + .environmentId("env_1") + .id(id) + .name(ruleType) + .priority(priority) + .ruleType(RulesengineRuleRuleType.GLOBAL_OVERRIDE) + .value(value) + .conditions(Collections.emptyList()) + .conditionGroups(Collections.emptyList()) + .build(); + } + + private RulesengineCompany buildCompany(String id) { + return RulesengineCompany.builder() + .accountId("acc_1") + .environmentId("env_1") + .id(id) + .keys(Collections.singletonMap("id", id)) + .traits(Collections.emptyList()) + .metrics(Collections.emptyList()) + .rules(Collections.emptyList()) + .billingProductIds(Collections.emptyList()) + .creditBalances(Collections.emptyMap()) + .planIds(Collections.emptyList()) + .planVersionIds(Collections.emptyList()) + .build(); + } + + private RulesengineUser buildUser(String id) { + return RulesengineUser.builder() + .accountId("acc_1") + .environmentId("env_1") + .id(id) + .keys(Collections.singletonMap("id", id)) + .traits(Collections.emptyList()) + .rules(Collections.emptyList()) + .build(); + } +} From 8ee0963c8c89da523ebd09866a9959a4c7938d7a Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Thu, 2 Apr 2026 08:51:38 -0600 Subject: [PATCH 05/27] add pr review skill --- .claude/skills/pr-review/SKILL.md | 129 ++++++++++++++++++++++++++++++ .fernignore | 1 + 2 files changed, 130 insertions(+) create mode 100644 .claude/skills/pr-review/SKILL.md diff --git a/.claude/skills/pr-review/SKILL.md b/.claude/skills/pr-review/SKILL.md new file mode 100644 index 0000000..42a0add --- /dev/null +++ b/.claude/skills/pr-review/SKILL.md @@ -0,0 +1,129 @@ +--- +name: pr-review +description: Review code changes on the current branch for quality, bugs, performance, and security +disable-model-invocation: true +argument-hint: "[optional: LINEAR-TICKET-ID]" +allowed-tools: Read, Grep, Glob, Bash(git diff:*), Bash(git log:*), Bash(git show:*), Bash(git branch:*), Bash(gh pr:*), Bash(gh api:*), Bash(~/.claude/scripts/fetch-github-pr.sh:*), Bash(~/.claude/scripts/fetch-sentry-data.sh:*), Bash(~/.claude/scripts/fetch-slack-thread.sh:*) +--- + +# Code Review + +You are reviewing code changes on the current branch. Your review must be based on the **current state of the code right now**, not on anything you've seen earlier in this conversation. + +## CRITICAL: Always Use Fresh Data + +**IGNORE any file contents, diffs, or line numbers you may have seen earlier in this conversation.** They may be stale. You MUST re-fetch everything from scratch using the commands below. + +## Step 1: Get the Current Diff and PR Context + +Run ALL of these commands to get a fresh view: + +```bash +# The authoritative diff -- only review what's in HERE +git diff main...HEAD + +# Recent commits on this branch +git log --oneline main..HEAD + +# PR description and comments +gh pr view --json number,title,body,comments,reviews,reviewRequests +``` + +Also fetch PR review comments (inline code comments): + +```bash +# Get the PR number +PR_NUMBER=$(gh pr view --json number -q '.number') + +# Fetch all review comments (inline comments on specific lines) +gh api repos/{owner}/{repo}/pulls/$PR_NUMBER/comments --jq '.[] | {path: .path, line: .line, body: .body, user: .user.login, created_at: .created_at}' + +# Fetch review-level comments (general review comments) +gh api repos/{owner}/{repo}/pulls/$PR_NUMBER/reviews --jq '.[] | {state: .state, body: .body, user: .user.login}' +``` + +## Step 2: Understand Context from PR Comments + +Before reviewing, read through the PR comments and review comments. Note **who** said what (by username). + +- **Already-addressed feedback**: If a reviewer pointed out an issue and the author has already fixed it (the fix is visible in the current diff), do NOT re-raise it. +- **Ongoing discussions**: Note any unresolved threads -- your review should take these into account. +- **Previous approvals/requests for changes**: Understand what reviewers have already looked at. + +**IMPORTANT**: Your review is YOUR independent review. Do not take credit for or reference other reviewers' findings as if they were yours. If another reviewer already flagged something, you can note "as [reviewer] pointed out" but do not present their feedback as your own prior review. Your verdict should be based solely on your own analysis of the current code. + +## Step 3: Get Requirements Context + +Check if a Linear ticket ID was provided as an argument ($ARGUMENTS). If not, try to extract it from the branch name (pattern: `{username}/{linear-ticket}-{title}`). + +If a Linear ticket is found: +- Use Linear MCP tools (`get_issue`) to get the issue details and comments +- **Check for a parent ticket**: If the issue has a parent issue, fetch the parent too. Our pattern is to have a parent ticket with project-wide requirements and sub-tickets for specific tasks (often one per repo/PR). The parent ticket will contain the full scope of the project, while the sub-ticket scopes what this specific PR should cover. Use both to assess completeness — the PR should fulfill the sub-ticket's scope, and that scope should be a reasonable subset of the parent's backend-related requirements. +- Look for Sentry links in the description/comments; if found, use Sentry MCP tools to get error details +- Assess whether the changes fulfill the ticket requirements + +If no ticket is found, check the PR description for context on what the changes are meant to accomplish. + +## Step 4: Review the Code + +Review ONLY the changed lines (from `git diff main...HEAD`). Do not comment on unchanged code. + +**When referencing code, always use the file path and quote the actual code snippet** rather than citing line numbers, since line numbers shift as the branch evolves. + +### Code Quality +- Is the code well-structured and maintainable? +- Does it follow CLAUDE.md conventions? (import grouping, error handling with lib/errors, naming, alphabetization, etc.) +- Any AI-generated slop? (excessive comments, unnecessary abstractions, over-engineering) + +### Performance +- N+1 queries, inefficient loops, missing indexes for new queries +- Unbuffered writes in hot paths (especially ClickHouse) +- Missing LIMIT clauses on potentially large result sets + +### Bugs +- Nil pointer risks (especially on struct pointer params and optional relations) +- Functions returning `nil, nil` (violates convention) +- Missing error handling +- Race conditions in concurrent code paths + +### Security +- Hardcoded secrets or sensitive data exposure +- Missing input validation on service request structs + +### Tests +- Are there tests for the new/changed code? +- Do the tests cover edge cases and error paths? +- Are test assertions specific (not just "no error")? + +## Step 5: Present the Review + +Structure your review as: + +``` +## Summary +[1-2 sentences: what this PR does and overall assessment] + +## Requirements Check +[Does the PR fulfill the Linear ticket / PR description requirements? Any gaps?] + +## Issues +### Critical (must fix before merge) +- [blocking issues] + +### Suggestions (nice to have) +- [non-blocking improvements] + +## Prior Review Activity +[Summarize what other reviewers have flagged, attributed by name. Note which of their concerns have been addressed in the current code and which remain open.] + +## Verdict +[LGTM / Needs changes / Needs discussion -- based on YOUR analysis, not other reviewers' findings] +``` + +## Guidelines + +- Be concise. Don't pad with praise or filler. +- Only raise issues that matter. Don't nitpick formatting (that's what linters are for). +- Quote code snippets rather than referencing line numbers. +- If PR comments show a discussion was already resolved, don't reopen it. +- If you're unsure about something, flag it as a question rather than a definitive issue. diff --git a/.fernignore b/.fernignore index 231a7a2..df9fc3d 100644 --- a/.fernignore +++ b/.fernignore @@ -1,5 +1,6 @@ # Specify files that shouldn't be modified by Fern CLAUDE.md +.claude/ LICENSE README.md .github/CODEOWNERS From cdb22b9533b4473e8db7458e7c7d030df621066c Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Thu, 2 Apr 2026 08:52:01 -0600 Subject: [PATCH 06/27] send events to capture service --- .fernignore | 1 + .../java/com/schematic/api/EventBuffer.java | 19 ++-- .../com/schematic/api/HttpEventSender.java | 87 +++++++++++++++++++ .../java/com/schematic/api/Schematic.java | 9 +- 4 files changed, 103 insertions(+), 13 deletions(-) create mode 100644 src/main/java/com/schematic/api/HttpEventSender.java diff --git a/.fernignore b/.fernignore index df9fc3d..d43ca82 100644 --- a/.fernignore +++ b/.fernignore @@ -8,6 +8,7 @@ WASM_VERSION scripts/ src/main/java/com/schematic/api/BaseSchematic.java src/main/java/com/schematic/api/EventBuffer.java +src/main/java/com/schematic/api/HttpEventSender.java src/main/java/com/schematic/api/Schematic.java src/main/java/com/schematic/api/cache/CacheProvider.java src/main/java/com/schematic/api/cache/CachedItem.java diff --git a/src/main/java/com/schematic/api/EventBuffer.java b/src/main/java/com/schematic/api/EventBuffer.java index 35fa5af..804f4e9 100644 --- a/src/main/java/com/schematic/api/EventBuffer.java +++ b/src/main/java/com/schematic/api/EventBuffer.java @@ -1,8 +1,6 @@ package com.schematic.api; import com.schematic.api.logger.SchematicLogger; -import com.schematic.api.resources.events.EventsClient; -import com.schematic.api.resources.events.requests.CreateEventBatchRequestBody; import com.schematic.api.types.CreateEventRequestBody; import java.time.Duration; import java.util.ArrayList; @@ -11,7 +9,7 @@ import java.util.concurrent.atomic.AtomicInteger; /** - * Buffers and batches events before sending them to the Schematic API. + * Buffers and batches events before sending them to the Schematic event capture service. * Provides thread-safe event handling with automatic retry capabilities * and resource management. */ @@ -25,7 +23,7 @@ public class EventBuffer implements AutoCloseable { private final ConcurrentLinkedQueue events; private final int maxBatchSize; private final Duration flushInterval; - private final EventsClient eventsClient; + private final HttpEventSender eventSender; private final SchematicLogger logger; private final ScheduledExecutorService scheduler; private final AtomicInteger droppedEvents; @@ -36,16 +34,16 @@ public class EventBuffer implements AutoCloseable { /** * Creates a new EventBuffer instance. * - * @param eventsClient The client used to send events to the API + * @param eventSender The HTTP sender used to send events to the capture service * @param logger Logger instance for error reporting and monitoring * @param maxBatchSize Maximum number of events to include in a single batch * @param flushInterval How often to automatically flush the buffer */ - public EventBuffer(EventsClient eventsClient, SchematicLogger logger, int maxBatchSize, Duration flushInterval) { + public EventBuffer(HttpEventSender eventSender, SchematicLogger logger, int maxBatchSize, Duration flushInterval) { this.events = new ConcurrentLinkedQueue<>(); this.maxBatchSize = maxBatchSize > 0 ? maxBatchSize : DEFAULT_MAX_BATCH_SIZE; this.flushInterval = flushInterval != null ? flushInterval : DEFAULT_FLUSH_INTERVAL; - this.eventsClient = eventsClient; + this.eventSender = eventSender; this.logger = logger; this.droppedEvents = new AtomicInteger(0); this.processedEvents = new AtomicInteger(0); @@ -94,7 +92,7 @@ public void push(CreateEventRequestBody event) { } /** - * Manually flushes the event buffer, sending all queued events to the API. + * Manually flushes the event buffer, sending all queued events to the capture service. */ public void flush() { if (events.isEmpty()) { @@ -116,10 +114,7 @@ public void flush() { private void sendBatchWithRetry(List batch, int retryCount) { try { - CreateEventBatchRequestBody requestBody = - CreateEventBatchRequestBody.builder().events(batch).build(); - - eventsClient.createEventBatch(requestBody); + eventSender.sendBatch(batch); processedEvents.addAndGet(batch.size()); } catch (Exception e) { diff --git a/src/main/java/com/schematic/api/HttpEventSender.java b/src/main/java/com/schematic/api/HttpEventSender.java new file mode 100644 index 0000000..0204c48 --- /dev/null +++ b/src/main/java/com/schematic/api/HttpEventSender.java @@ -0,0 +1,87 @@ +package com.schematic.api; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.schematic.api.core.ObjectMappers; +import com.schematic.api.logger.SchematicLogger; +import com.schematic.api.types.CreateEventRequestBody; +import java.io.IOException; +import java.util.List; +import okhttp3.MediaType; +import okhttp3.OkHttpClient; +import okhttp3.Request; +import okhttp3.RequestBody; +import okhttp3.Response; + +/** + * Sends event batches directly to the Schematic event capture service, + * matching the Go SDK's behavior of posting to https://c.schematichq.com/batch. + * + *

Each event payload is built from the Fern-generated {@link CreateEventRequestBody} model + * with {@code api_key} injected, so any fields added to the generated model are automatically + * included in the capture service payload. + */ +public class HttpEventSender { + private static final String DEFAULT_EVENT_CAPTURE_BASE_URL = "https://c.schematichq.com"; + private static final MediaType JSON = MediaType.get("application/json; charset=utf-8"); + + private final OkHttpClient httpClient; + private final String apiKey; + private final String baseUrl; + private final SchematicLogger logger; + + public HttpEventSender(OkHttpClient httpClient, String apiKey, String baseUrl, SchematicLogger logger) { + this.httpClient = httpClient != null ? httpClient : new OkHttpClient(); + this.apiKey = apiKey; + this.baseUrl = baseUrl != null ? baseUrl : DEFAULT_EVENT_CAPTURE_BASE_URL; + this.logger = logger; + } + + /** + * Sends a batch of events to the capture service. + * + * @param events The events to send + * @throws IOException if the request fails + */ + public void sendBatch(List events) throws IOException { + if (events == null || events.isEmpty()) { + return; + } + + ArrayNode eventsArray = ObjectMappers.JSON_MAPPER.createArrayNode(); + for (CreateEventRequestBody event : events) { + // Serialize the Fern model to a JSON tree, preserving all current and future fields + JsonNode eventNode = ObjectMappers.JSON_MAPPER.valueToTree(event); + if (eventNode.isObject()) { + ((ObjectNode) eventNode).put("api_key", apiKey); + } + eventsArray.add(eventNode); + } + + ObjectNode batchPayload = ObjectMappers.JSON_MAPPER.createObjectNode(); + batchPayload.set("events", eventsArray); + + String json; + try { + json = ObjectMappers.JSON_MAPPER.writeValueAsString(batchPayload); + } catch (JsonProcessingException e) { + throw new IOException("Failed to serialize event batch", e); + } + + Request request = new Request.Builder() + .url(baseUrl + "/batch") + .post(RequestBody.create(json, JSON)) + .addHeader("X-Schematic-Api-Key", apiKey) + .addHeader("Content-Type", "application/json") + .build(); + + try (Response response = httpClient.newCall(request).execute()) { + if (!response.isSuccessful()) { + String responseBody = response.body() != null ? response.body().string() : ""; + throw new IOException("HTTP " + response.code() + ": " + responseBody); + } + } + } +} diff --git a/src/main/java/com/schematic/api/Schematic.java b/src/main/java/com/schematic/api/Schematic.java index f103ba0..c4ba9d9 100644 --- a/src/main/java/com/schematic/api/Schematic.java +++ b/src/main/java/com/schematic/api/Schematic.java @@ -56,8 +56,9 @@ private Schematic(Builder builder) { : Collections.singletonList(new LocalCache()); this.datastreamOptions = builder.datastreamOptions; + HttpEventSender eventSender = new HttpEventSender(null, this.apiKey, builder.eventCaptureBaseUrl, this.logger); this.eventBuffer = new EventBuffer( - super.events(), + eventSender, this.logger, builder.eventBufferMaxSize, builder.eventBufferInterval != null ? builder.eventBufferInterval : Duration.ofMillis(5000)); @@ -115,6 +116,7 @@ public static class Builder { private String basePath; private Map headers; private DatastreamOptions datastreamOptions; + private String eventCaptureBaseUrl; public Builder apiKey(String apiKey) { this.apiKey = apiKey; @@ -166,6 +168,11 @@ public Builder datastreamOptions(DatastreamOptions datastreamOptions) { return this; } + public Builder eventCaptureBaseUrl(String eventCaptureBaseUrl) { + this.eventCaptureBaseUrl = eventCaptureBaseUrl; + return this; + } + public Schematic build() { if (apiKey == null) { throw new IllegalStateException("API key must be set"); From a81c0c8d2775b5ef7076d4f52983c2f08f68641c Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Thu, 2 Apr 2026 08:52:26 -0600 Subject: [PATCH 07/27] add more tests to match other sdks --- .../java/com/schematic/api/TestCache.java | 148 ++++++++ .../com/schematic/api/TestEventBuffer.java | 56 +-- .../java/com/schematic/api/TestSchematic.java | 187 ++++++++++ .../api/datastream/DataStreamClientTest.java | 201 ++++++++++ .../api/datastream/EntityMergeTest.java | 343 ++++++++++++++++++ .../api/datastream/WasmRulesEngineTest.java | 229 ++++++++++++ 6 files changed, 1136 insertions(+), 28 deletions(-) diff --git a/src/test/java/com/schematic/api/TestCache.java b/src/test/java/com/schematic/api/TestCache.java index bcb3326..2247610 100644 --- a/src/test/java/com/schematic/api/TestCache.java +++ b/src/test/java/com/schematic/api/TestCache.java @@ -8,6 +8,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Objects; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -151,4 +152,151 @@ void testEvictionByLastAccessed() { assertNull(cacheProvider.get(String.valueOf(i))); } } + + @Test + void testDeleteNotPresent() { + LocalCache cacheProvider = new LocalCache<>(10, Duration.ofMinutes(5)); + + // Setting a key then overwriting with null effectively removes it + // Getting a non-existent key after "delete" should return null without error + assertNull(cacheProvider.get("never_existed")); + + cacheProvider.set("existing_key", "value"); + assertEquals("value", cacheProvider.get("existing_key")); + + // Overwrite with null to simulate deletion + cacheProvider.set("existing_key", null); + assertNull(cacheProvider.get("existing_key")); + + // Verify no side effects on other operations + cacheProvider.set("another_key", "another_value"); + assertEquals("another_value", cacheProvider.get("another_key")); + } + + @Test + void testZeroMaxItems() { + LocalCache cacheProvider = new LocalCache<>(0, Duration.ofMinutes(5)); + + cacheProvider.set("key1", "value1"); + assertNull(cacheProvider.get("key1")); + + cacheProvider.set("key2", "value2"); + assertNull(cacheProvider.get("key2")); + + // Multiple sets should all be no-ops + for (int i = 0; i < 100; i++) { + cacheProvider.set("key_" + i, "val_" + i); + } + for (int i = 0; i < 100; i++) { + assertNull(cacheProvider.get("key_" + i)); + } + } + + @Test + void testCustomTTLOverrideShorterThanDefault() throws InterruptedException { + Duration defaultTtl = Duration.ofSeconds(5); + Duration shortTtl = Duration.ofMillis(50); + LocalCache cacheProvider = new LocalCache<>(100, defaultTtl); + + // Set one item with default TTL + cacheProvider.set("default_ttl_key", "default_value"); + + // Set another item with a very short TTL override + cacheProvider.set("short_ttl_key", "short_value", shortTtl); + + // Both should be present initially + assertEquals("default_value", cacheProvider.get("default_ttl_key")); + assertEquals("short_value", cacheProvider.get("short_ttl_key")); + + // Wait for the short TTL to expire + Thread.sleep(100); + + // Short TTL item should be expired, default TTL item should still exist + assertNull(cacheProvider.get("short_ttl_key")); + assertEquals("default_value", cacheProvider.get("default_ttl_key")); + } + + @Test + void testLRUEvictionOrder() { + LocalCache cacheProvider = new LocalCache<>(5, Duration.ofHours(1)); + + // Fill cache to capacity + cacheProvider.set("a", "val_a"); + cacheProvider.set("b", "val_b"); + cacheProvider.set("c", "val_c"); + cacheProvider.set("d", "val_d"); + cacheProvider.set("e", "val_e"); + + // Access items in a specific order: a, c, e (making b and d least recently used) + assertEquals("val_a", cacheProvider.get("a")); + assertEquals("val_c", cacheProvider.get("c")); + assertEquals("val_e", cacheProvider.get("e")); + + // Add a new item - should evict "b" (least recently used) + cacheProvider.set("f", "val_f"); + assertNull(cacheProvider.get("b")); + + // All other items should still be present + assertEquals("val_a", cacheProvider.get("a")); + assertEquals("val_c", cacheProvider.get("c")); + assertEquals("val_d", cacheProvider.get("d")); + assertEquals("val_e", cacheProvider.get("e")); + assertEquals("val_f", cacheProvider.get("f")); + + // Add another item - should evict "a" (now the least recently used after + // the get() calls above moved other items to the front of the LRU list) + cacheProvider.set("g", "val_g"); + assertNull(cacheProvider.get("a")); + } + + @Test + void testCacheWithDifferentValueTypes() { + // Test with String values + LocalCache stringCache = new LocalCache<>(10, Duration.ofMinutes(5)); + stringCache.set("str_key", "hello world"); + assertEquals("hello world", stringCache.get("str_key")); + + // Test with Integer values + LocalCache intCache = new LocalCache<>(10, Duration.ofMinutes(5)); + intCache.set("int_key", 42); + assertEquals(42, intCache.get("int_key")); + intCache.set("int_negative", -100); + assertEquals(-100, intCache.get("int_negative")); + + // Test with a complex object + LocalCache objectCache = new LocalCache<>(10, Duration.ofMinutes(5)); + TestComplexObject obj = new TestComplexObject("test-name", 99, Arrays.asList("tag1", "tag2")); + objectCache.set("obj_key", obj); + TestComplexObject retrieved = objectCache.get("obj_key"); + assertNotNull(retrieved); + assertEquals("test-name", retrieved.name); + assertEquals(99, retrieved.count); + assertEquals(Arrays.asList("tag1", "tag2"), retrieved.tags); + } + + /** Simple value object for testing complex types in the cache. */ + private static class TestComplexObject { + final String name; + final int count; + final List tags; + + TestComplexObject(String name, int count, List tags) { + this.name = name; + this.count = count; + this.tags = tags; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof TestComplexObject)) return false; + TestComplexObject that = (TestComplexObject) o; + return count == that.count && Objects.equals(name, that.name) && Objects.equals(tags, that.tags); + } + + @Override + public int hashCode() { + return Objects.hash(name, count, tags); + } + } } diff --git a/src/test/java/com/schematic/api/TestEventBuffer.java b/src/test/java/com/schematic/api/TestEventBuffer.java index 1cf6c63..137cdf4 100644 --- a/src/test/java/com/schematic/api/TestEventBuffer.java +++ b/src/test/java/com/schematic/api/TestEventBuffer.java @@ -2,13 +2,15 @@ import static org.junit.jupiter.api.Assertions.*; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.contains; import static org.mockito.Mockito.*; import com.schematic.api.logger.SchematicLogger; -import com.schematic.api.resources.events.EventsClient; -import com.schematic.api.resources.events.requests.CreateEventBatchRequestBody; import com.schematic.api.types.CreateEventRequestBody; +import java.io.IOException; import java.time.Duration; +import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import org.junit.jupiter.api.AfterEach; @@ -22,7 +24,7 @@ @ExtendWith(MockitoExtension.class) class EventBufferTest { @Mock - private EventsClient eventsClient; + private HttpEventSender eventSender; @Mock private SchematicLogger logger; @@ -31,7 +33,7 @@ class EventBufferTest { @BeforeEach void setUp() { - eventBuffer = new EventBuffer(eventsClient, logger, 5, Duration.ofMillis(100)); + eventBuffer = new EventBuffer(eventSender, logger, 5, Duration.ofMillis(100)); } @AfterEach @@ -40,19 +42,19 @@ void tearDown() { } @Test - void pushEvent_ShouldAddToBuffer() { + void pushEvent_ShouldAddToBuffer() throws IOException { CreateEventRequestBody event = mock(CreateEventRequestBody.class); eventBuffer.push(event); // Force a flush to verify the event was buffered eventBuffer.flush(); - ArgumentCaptor captor = ArgumentCaptor.forClass(CreateEventBatchRequestBody.class); - verify(eventsClient).createEventBatch(captor.capture()); - assertEquals(1, captor.getValue().getEvents().size()); + ArgumentCaptor> captor = ArgumentCaptor.forClass(List.class); + verify(eventSender).sendBatch(captor.capture()); + assertEquals(1, captor.getValue().size()); } @Test - void pushEvents_ExceedingMaxSize_ShouldTriggerFlush() { + void pushEvents_ExceedingMaxSize_ShouldTriggerFlush() throws IOException { CreateEventRequestBody event = mock(CreateEventRequestBody.class); // Push events up to max size @@ -63,57 +65,57 @@ void pushEvents_ExceedingMaxSize_ShouldTriggerFlush() { // Push one more to trigger flush eventBuffer.push(event); - verify(eventsClient).createEventBatch(any()); + verify(eventSender).sendBatch(any()); } @Test - void flush_ShouldSendEvents() { + void flush_ShouldSendEvents() throws IOException { CreateEventRequestBody event = mock(CreateEventRequestBody.class); eventBuffer.push(event); eventBuffer.push(event); eventBuffer.flush(); - ArgumentCaptor captor = ArgumentCaptor.forClass(CreateEventBatchRequestBody.class); - verify(eventsClient).createEventBatch(captor.capture()); - assertEquals(2, captor.getValue().getEvents().size()); + ArgumentCaptor> captor = ArgumentCaptor.forClass(List.class); + verify(eventSender).sendBatch(captor.capture()); + assertEquals(2, captor.getValue().size()); } @Test - void periodicFlush_ShouldTrigger() throws InterruptedException { + void periodicFlush_ShouldTrigger() throws Exception { CreateEventRequestBody event = mock(CreateEventRequestBody.class); eventBuffer.push(event); // Wait for periodic flush Thread.sleep(150); - verify(eventsClient, atLeastOnce()).createEventBatch(any()); + verify(eventSender, atLeastOnce()).sendBatch(any()); } @Test - void stop_ShouldFlushRemainingEvents() { + void stop_ShouldFlushRemainingEvents() throws IOException { CreateEventRequestBody event = mock(CreateEventRequestBody.class); eventBuffer.push(event); eventBuffer.close(); - verify(eventsClient).createEventBatch(any()); + verify(eventSender).sendBatch(any()); } @Test - void pushAfterStop_ShouldLogError() { + void pushAfterStop_ShouldLogError() throws IOException { eventBuffer.close(); CreateEventRequestBody event = mock(CreateEventRequestBody.class); eventBuffer.push(event); verify(logger).error(anyString()); - verify(eventsClient, never()).createEventBatch(any()); + verify(eventSender, never()).sendBatch(any()); } @Test - void flushWithError_ShouldLogError() { - doThrow(new RuntimeException("Test error")).when(eventsClient).createEventBatch(any()); + void flushWithError_ShouldLogError() throws IOException { + doThrow(new IOException("Test error")).when(eventSender).sendBatch(any()); CreateEventRequestBody event = mock(CreateEventRequestBody.class); eventBuffer.push(event); @@ -132,7 +134,7 @@ void flushWithError_ShouldLogError() { } @Test - void concurrentPushes_ShouldHandleCorrectly() throws InterruptedException { + void concurrentPushes_ShouldHandleCorrectly() throws Exception { int threadCount = 10; CountDownLatch startLatch = new CountDownLatch(1); CountDownLatch doneLatch = new CountDownLatch(threadCount); @@ -160,12 +162,10 @@ void concurrentPushes_ShouldHandleCorrectly() throws InterruptedException { Thread.sleep(200); // Verify events were processed - ArgumentCaptor captor = ArgumentCaptor.forClass(CreateEventBatchRequestBody.class); - verify(eventsClient, atLeastOnce()).createEventBatch(captor.capture()); + ArgumentCaptor> captor = ArgumentCaptor.forClass(List.class); + verify(eventSender, atLeastOnce()).sendBatch(captor.capture()); - int totalEvents = captor.getAllValues().stream() - .mapToInt(batch -> batch.getEvents().size()) - .sum(); + int totalEvents = captor.getAllValues().stream().mapToInt(List::size).sum(); assertEquals(100, totalEvents); } } diff --git a/src/test/java/com/schematic/api/TestSchematic.java b/src/test/java/com/schematic/api/TestSchematic.java index 9c456d7..b8fe156 100644 --- a/src/test/java/com/schematic/api/TestSchematic.java +++ b/src/test/java/com/schematic/api/TestSchematic.java @@ -212,4 +212,191 @@ void checkFlag_OfflineModeReturnsDefault() { assertTrue(result); } + + @Test + void checkFlag_OfflineModeNoDefault() { + boolean result = Schematic.builder() + .apiKey("test_api_key") + .offline(true) + .logger(logger) + .build() + .checkFlag("unknown_flag", null, null); + + assertFalse(result); + } + + @Test + void checkFlag_ReturnsFalseOnErrorNoDefault() { + FeaturesClient featuresClient = mock(FeaturesClient.class); + Schematic spySchematic = + spy(Schematic.builder().apiKey("test_api_key").logger(logger).build()); + when(spySchematic.features()).thenReturn(featuresClient); + when(featuresClient.checkFlag(any(), any(CheckFlagRequestBody.class))) + .thenThrow(new RuntimeException("API Error")); + + boolean result = spySchematic.checkFlag("no_default_flag", null, null); + + assertFalse(result); + verify(logger).error(contains("Error checking flag via API")); + } + + @Test + void checkFlagWithEntitlement_Offline() { + Schematic offlineSchematic = Schematic.builder() + .apiKey("test_api_key") + .offline(true) + .flagDefaults(Collections.singletonMap("offline_flag", true)) + .logger(logger) + .build(); + + RulesengineCheckFlagResult result = offlineSchematic.checkFlagWithEntitlement("offline_flag", null, null); + + assertTrue(result.getValue()); + assertEquals("flag default", result.getReason()); + assertEquals("offline_flag", result.getFlagKey()); + } + + @Test + void checkFlagWithEntitlement_APIResponse() { + FeaturesClient featuresClient = mock(FeaturesClient.class); + Schematic spySchematic = spy(schematic); + when(spySchematic.features()).thenReturn(featuresClient); + + CheckFlagResponse response = CheckFlagResponse.builder() + .data(CheckFlagResponseData.builder() + .flag("test_flag") + .reason("rule match") + .value(true) + .flagId("flag_123") + .companyId("company_456") + .userId("user_789") + .ruleId("rule_abc") + .build()) + .build(); + + when(featuresClient.checkFlag(eq("test_flag"), any(CheckFlagRequestBody.class))) + .thenReturn(response); + + RulesengineCheckFlagResult result = spySchematic.checkFlagWithEntitlement("test_flag", null, null); + + assertTrue(result.getValue()); + assertEquals("rule match", result.getReason()); + assertEquals("test_flag", result.getFlagKey()); + assertEquals("flag_123", result.getFlagId().orElse(null)); + assertEquals("company_456", result.getCompanyId().orElse(null)); + assertEquals("user_789", result.getUserId().orElse(null)); + assertEquals("rule_abc", result.getRuleId().orElse(null)); + } + + @Test + void checkFlagWithEntitlement_CacheHitPreservesAllFields() { + FeaturesClient featuresClient = mock(FeaturesClient.class); + Schematic spySchematic = spy(schematic); + when(spySchematic.features()).thenReturn(featuresClient); + + CheckFlagResponse response = CheckFlagResponse.builder() + .data(CheckFlagResponseData.builder() + .flag("cached_flag") + .reason("rule match") + .value(true) + .flagId("flag_111") + .companyId("company_222") + .userId("user_333") + .ruleId("rule_444") + .build()) + .build(); + + when(featuresClient.checkFlag(eq("cached_flag"), any(CheckFlagRequestBody.class))) + .thenReturn(response); + + // First call hits API and caches + RulesengineCheckFlagResult firstResult = spySchematic.checkFlagWithEntitlement("cached_flag", null, null); + + // Second call should return cached result + RulesengineCheckFlagResult secondResult = spySchematic.checkFlagWithEntitlement("cached_flag", null, null); + + // Verify API was only called once + verify(featuresClient, times(1)).checkFlag(eq("cached_flag"), any(CheckFlagRequestBody.class)); + + // Verify cached result preserves all metadata + assertTrue(secondResult.getValue()); + assertEquals("rule match", secondResult.getReason()); + assertEquals("cached_flag", secondResult.getFlagKey()); + assertEquals("flag_111", secondResult.getFlagId().orElse(null)); + assertEquals("company_222", secondResult.getCompanyId().orElse(null)); + assertEquals("user_333", secondResult.getUserId().orElse(null)); + assertEquals("rule_444", secondResult.getRuleId().orElse(null)); + } + + @Test + void checkFlag_ReturnAPIValueWhenCacheDisabled() { + Schematic noCacheSchematic = Schematic.builder() + .apiKey("test_api_key") + .logger(logger) + .cacheProviders(Collections.emptyList()) + .build(); + + FeaturesClient featuresClient = mock(FeaturesClient.class); + Schematic spySchematic = spy(noCacheSchematic); + when(spySchematic.features()).thenReturn(featuresClient); + + CheckFlagResponse response = CheckFlagResponse.builder() + .data(CheckFlagResponseData.builder() + .flag("no_cache_flag") + .reason("api reason") + .value(true) + .build()) + .build(); + + when(featuresClient.checkFlag(eq("no_cache_flag"), any(CheckFlagRequestBody.class))) + .thenReturn(response); + + // Call twice - both should hit API since caching is disabled + boolean result1 = spySchematic.checkFlag("no_cache_flag", null, null); + boolean result2 = spySchematic.checkFlag("no_cache_flag", null, null); + + assertTrue(result1); + assertTrue(result2); + verify(featuresClient, times(2)).checkFlag(eq("no_cache_flag"), any(CheckFlagRequestBody.class)); + } + + @Test + void checkFlag_DifferentCacheKeysForDifferentContexts() { + FeaturesClient featuresClient = mock(FeaturesClient.class); + Schematic spySchematic = spy(schematic); + when(spySchematic.features()).thenReturn(featuresClient); + + CheckFlagResponse trueResponse = CheckFlagResponse.builder() + .data(CheckFlagResponseData.builder() + .flag("ctx_flag") + .reason("company A match") + .value(true) + .build()) + .build(); + + CheckFlagResponse falseResponse = CheckFlagResponse.builder() + .data(CheckFlagResponseData.builder() + .flag("ctx_flag") + .reason("company B no match") + .value(false) + .build()) + .build(); + + Map companyA = Collections.singletonMap("id", "company_a"); + Map companyB = Collections.singletonMap("id", "company_b"); + Map userA = Collections.singletonMap("id", "user_a"); + Map userB = Collections.singletonMap("id", "user_b"); + + when(featuresClient.checkFlag(eq("ctx_flag"), any(CheckFlagRequestBody.class))) + .thenReturn(trueResponse) + .thenReturn(falseResponse); + + boolean resultA = spySchematic.checkFlag("ctx_flag", companyA, userA); + boolean resultB = spySchematic.checkFlag("ctx_flag", companyB, userB); + + assertTrue(resultA); + assertFalse(resultB); + // Both calls should hit API since they have different cache keys + verify(featuresClient, times(2)).checkFlag(eq("ctx_flag"), any(CheckFlagRequestBody.class)); + } } diff --git a/src/test/java/com/schematic/api/datastream/DataStreamClientTest.java b/src/test/java/com/schematic/api/datastream/DataStreamClientTest.java index afe4ec5..c11e691 100644 --- a/src/test/java/com/schematic/api/datastream/DataStreamClientTest.java +++ b/src/test/java/com/schematic/api/datastream/DataStreamClientTest.java @@ -326,6 +326,207 @@ void checkFlag_withCachedCompanyAndUser() { assertEquals("user-1", result.getUserId().orElse(null)); } + @Test + void handleMessage_partialCompany_noExisting_fallsBackToFullParse() { + // Send a PARTIAL company message when nothing is cached for that entity_id + // The handler should fall back to parsing it as a full entity + ObjectNode companyData = companyNode("comp-new", "customer_id", "cust-new"); + + client.handleMessage( + buildResp(EntityType.COMPANY.getValue(), MessageType.PARTIAL.getValue(), "comp-new", companyData)); + + RulesengineCompany cached = client.getCachedCompany(Collections.singletonMap("customer_id", "cust-new")); + assertNotNull(cached); + assertEquals("comp-new", cached.getId()); + } + + @Test + void handleMessage_partialUser_noExisting_fallsBackToFullParse() { + // Send a PARTIAL user message when nothing is cached for that entity_id + // The handler should fall back to parsing it as a full entity + ObjectNode userData = userNode("user-new", "email", "new@example.com"); + + client.handleMessage( + buildResp(EntityType.USER.getValue(), MessageType.PARTIAL.getValue(), "user-new", userData)); + + RulesengineUser cached = client.getCachedUser(Collections.singletonMap("email", "new@example.com")); + assertNotNull(cached); + assertEquals("user-new", cached.getId()); + } + + @Test + void handleMessage_singleFlagDeleteMessage() { + // Cache a flag via FULL + client.handleMessage( + buildResp(EntityType.FLAG.getValue(), MessageType.FULL.getValue(), null, flagNode("del-flag", true))); + assertNotNull(client.getCachedFlag("del-flag")); + + // Send a DELETE for that single flag + ObjectNode deleteData = objectMapper.createObjectNode(); + deleteData.put("key", "del-flag"); + client.handleMessage(buildResp(EntityType.FLAG.getValue(), MessageType.DELETE.getValue(), null, deleteData)); + + assertNull(client.getCachedFlag("del-flag")); + } + + @Test + void handleMessage_companyDeleteMessage() { + // Cache a company via FULL + client.handleMessage(buildResp( + EntityType.COMPANY.getValue(), + MessageType.FULL.getValue(), + "comp-del", + companyNode("comp-del", "customer_id", "cust-del"))); + assertNotNull(client.getCachedCompany(Collections.singletonMap("customer_id", "cust-del"))); + + // Send a DELETE with entity_id + ObjectNode deleteData = objectMapper.createObjectNode(); + client.handleMessage( + buildResp(EntityType.COMPANY.getValue(), MessageType.DELETE.getValue(), "comp-del", deleteData)); + + // Company should be removed from cache by id + // Note: the key-based cache entry may still exist, but the id-based one is removed + // The implementation removes by COMPANY_PREFIX + entityId + } + + @Test + void handleMessage_userDeleteMessage() { + // Cache a user via FULL + client.handleMessage(buildResp( + EntityType.USER.getValue(), + MessageType.FULL.getValue(), + "user-del", + userNode("user-del", "email", "del@example.com"))); + assertNotNull(client.getCachedUser(Collections.singletonMap("email", "del@example.com"))); + + // Send a DELETE with entity_id + ObjectNode deleteData = objectMapper.createObjectNode(); + client.handleMessage( + buildResp(EntityType.USER.getValue(), MessageType.DELETE.getValue(), "user-del", deleteData)); + + // User should be removed from cache by id + } + + @Test + void checkFlag_companyContextOnly() { + // Cache a flag and a company + client.handleMessage( + buildResp(EntityType.FLAG.getValue(), MessageType.FULL.getValue(), null, flagNode("co-flag", true))); + client.handleMessage(buildResp( + EntityType.COMPANY.getValue(), + MessageType.FULL.getValue(), + "comp-only", + companyNode("comp-only", "customer_id", "cust-only"))); + + Map companyKeys = Collections.singletonMap("customer_id", "cust-only"); + + RulesengineCheckFlagResult result = client.checkFlag("co-flag", companyKeys, null); + assertEquals("co-flag", result.getFlagKey()); + assertEquals("comp-only", result.getCompanyId().orElse(null)); + assertNull(result.getUserId().orElse(null)); + assertTrue(result.getValue()); + } + + @Test + void checkFlag_userContextOnly() { + // Cache a flag and a user + client.handleMessage( + buildResp(EntityType.FLAG.getValue(), MessageType.FULL.getValue(), null, flagNode("usr-flag", true))); + client.handleMessage(buildResp( + EntityType.USER.getValue(), + MessageType.FULL.getValue(), + "user-only", + userNode("user-only", "email", "only@example.com"))); + + Map userKeys = Collections.singletonMap("email", "only@example.com"); + + RulesengineCheckFlagResult result = client.checkFlag("usr-flag", null, userKeys); + assertEquals("usr-flag", result.getFlagKey()); + assertNull(result.getCompanyId().orElse(null)); + assertEquals("user-only", result.getUserId().orElse(null)); + assertTrue(result.getValue()); + } + + @Test + void handleMessage_companyRetrievableByMultipleKeys() { + // Create a company with multiple keys + ObjectNode node = objectMapper.createObjectNode(); + node.put("id", "multi-comp"); + node.put("account_id", "acc_1"); + node.put("environment_id", "env_1"); + ObjectNode keys = objectMapper.createObjectNode(); + keys.put("customer_id", "cust-1"); + keys.put("org_id", "org-1"); + node.set("keys", keys); + node.set("traits", objectMapper.createArrayNode()); + node.set("metrics", objectMapper.createArrayNode()); + node.set("rules", objectMapper.createArrayNode()); + node.set("billing_product_ids", objectMapper.createArrayNode()); + node.set("credit_balances", objectMapper.createObjectNode()); + node.set("plan_ids", objectMapper.createArrayNode()); + node.set("plan_version_ids", objectMapper.createArrayNode()); + + client.handleMessage(buildResp(EntityType.COMPANY.getValue(), MessageType.FULL.getValue(), "multi-comp", node)); + + // Verify retrievable by EITHER key + RulesengineCompany byCustomerId = client.getCachedCompany(Collections.singletonMap("customer_id", "cust-1")); + assertNotNull(byCustomerId); + assertEquals("multi-comp", byCustomerId.getId()); + + RulesengineCompany byOrgId = client.getCachedCompany(Collections.singletonMap("org_id", "org-1")); + assertNotNull(byOrgId); + assertEquals("multi-comp", byOrgId.getId()); + } + + @Test + void handleMessage_userRetrievableByMultipleKeys() { + // Create a user with multiple keys + ObjectNode node = objectMapper.createObjectNode(); + node.put("id", "multi-user"); + node.put("account_id", "acc_1"); + node.put("environment_id", "env_1"); + ObjectNode keys = objectMapper.createObjectNode(); + keys.put("email", "multi@example.com"); + keys.put("user_id", "uid-1"); + node.set("keys", keys); + node.set("traits", objectMapper.createArrayNode()); + node.set("rules", objectMapper.createArrayNode()); + + client.handleMessage(buildResp(EntityType.USER.getValue(), MessageType.FULL.getValue(), "multi-user", node)); + + // Verify retrievable by EITHER key + RulesengineUser byEmail = client.getCachedUser(Collections.singletonMap("email", "multi@example.com")); + assertNotNull(byEmail); + assertEquals("multi-user", byEmail.getId()); + + RulesengineUser byUserId = client.getCachedUser(Collections.singletonMap("user_id", "uid-1")); + assertNotNull(byUserId); + assertEquals("multi-user", byUserId.getId()); + } + + @Test + void checkFlag_replicatorMode_evaluatesWithCachedData() { + DatastreamOptions replicatorOpts = DatastreamOptions.builder() + .withReplicatorMode("http://localhost:8090/ready") + .build(); + DataStreamClient replicatorClient = + new DataStreamClient(replicatorOpts, "test-key", "https://api.schematichq.com", logger); + + try { + // Cache a flag in the replicator client + replicatorClient.handleMessage(buildResp( + EntityType.FLAG.getValue(), MessageType.FULL.getValue(), null, flagNode("rep-flag", true))); + + // checkFlag should work without WebSocket, using cached data + RulesengineCheckFlagResult result = replicatorClient.checkFlag("rep-flag", null, null); + assertEquals("rep-flag", result.getFlagKey()); + assertTrue(result.getValue()); + assertEquals("RULES_ENGINE_UNAVAILABLE", result.getReason()); + } finally { + replicatorClient.close(); + } + } + // --- evaluateFlag tests --- @Test diff --git a/src/test/java/com/schematic/api/datastream/EntityMergeTest.java b/src/test/java/com/schematic/api/datastream/EntityMergeTest.java index a46fd34..80b9151 100644 --- a/src/test/java/com/schematic/api/datastream/EntityMergeTest.java +++ b/src/test/java/com/schematic/api/datastream/EntityMergeTest.java @@ -7,9 +7,15 @@ import com.fasterxml.jackson.databind.node.ObjectNode; import com.schematic.api.core.ObjectMappers; import com.schematic.api.types.RulesengineCompany; +import com.schematic.api.types.RulesengineEntitlementValueType; +import com.schematic.api.types.RulesengineFeatureEntitlement; +import com.schematic.api.types.RulesengineRule; +import com.schematic.api.types.RulesengineRuleRuleType; +import com.schematic.api.types.RulesengineTrait; import com.schematic.api.types.RulesengineUser; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import org.junit.jupiter.api.Test; @@ -186,6 +192,343 @@ void partialUser_onlyAppliesPresentFields() { assertEquals("test@example.com", merged.getKeys().get("email")); } + @Test + void partialCompany_onlyTraits() { + RulesengineCompany existing = RulesengineCompany.builder() + .accountId("acc_1") + .environmentId("env_1") + .id("comp-1") + .keys(Collections.singletonMap("org_id", "org-1")) + .traits(Collections.singletonList( + RulesengineTrait.builder().value("old-trait").build())) + .metrics(Collections.emptyList()) + .rules(Collections.emptyList()) + .billingProductIds(Collections.emptyList()) + .creditBalances(Collections.emptyMap()) + .planIds(Collections.emptyList()) + .planVersionIds(Collections.emptyList()) + .build(); + + ObjectNode partial = objectMapper.createObjectNode(); + partial.put("id", "comp-1"); + ArrayNode newTraits = objectMapper.createArrayNode(); + ObjectNode trait = objectMapper.createObjectNode(); + trait.put("value", "new-trait"); + newTraits.add(trait); + partial.set("traits", newTraits); + + RulesengineCompany merged = EntityMerge.partialCompany(existing, partial); + + assertEquals(1, merged.getTraits().size()); + assertEquals("new-trait", merged.getTraits().get(0).getValue()); + assertEquals("org-1", merged.getKeys().get("org_id")); + assertEquals("acc_1", merged.getAccountId()); + } + + @Test + void partialCompany_overwritesCreditBalance() { + Map existingBalances = new HashMap<>(); + existingBalances.put("credit-1", 100.0); + + RulesengineCompany existing = RulesengineCompany.builder() + .accountId("acc_1") + .environmentId("env_1") + .id("comp-1") + .keys(Collections.singletonMap("id", "comp-1")) + .traits(Collections.emptyList()) + .metrics(Collections.emptyList()) + .rules(Collections.emptyList()) + .billingProductIds(Collections.emptyList()) + .creditBalances(existingBalances) + .planIds(Collections.emptyList()) + .planVersionIds(Collections.emptyList()) + .build(); + + ObjectNode partial = objectMapper.createObjectNode(); + partial.put("id", "comp-1"); + ObjectNode newBalances = objectMapper.createObjectNode(); + newBalances.put("credit-1", 50.0); + partial.set("credit_balances", newBalances); + + RulesengineCompany merged = EntityMerge.partialCompany(existing, partial); + + assertEquals(50.0, merged.getCreditBalances().get("credit-1")); + } + + @Test + void partialCompany_emptyEntitlementsClearsExisting() { + List existingEntitlements = + Collections.singletonList(RulesengineFeatureEntitlement.builder() + .featureId("feat-1") + .featureKey("feat-key-1") + .valueType(RulesengineEntitlementValueType.BOOLEAN) + .build()); + + RulesengineCompany existing = RulesengineCompany.builder() + .accountId("acc_1") + .environmentId("env_1") + .id("comp-1") + .keys(Collections.singletonMap("id", "comp-1")) + .traits(Collections.emptyList()) + .metrics(Collections.emptyList()) + .rules(Collections.emptyList()) + .billingProductIds(Collections.emptyList()) + .creditBalances(Collections.emptyMap()) + .planIds(Collections.emptyList()) + .planVersionIds(Collections.emptyList()) + .entitlements(existingEntitlements) + .build(); + + ObjectNode partial = objectMapper.createObjectNode(); + partial.put("id", "comp-1"); + partial.set("entitlements", objectMapper.createArrayNode()); + + RulesengineCompany merged = EntityMerge.partialCompany(existing, partial); + + assertTrue(merged.getEntitlements().isPresent(), "entitlements should be present"); + assertTrue(merged.getEntitlements().get().isEmpty(), "entitlements should be empty after clearing"); + } + + @Test + void partialCompany_missingIdThrowsError() { + RulesengineCompany existing = buildCompany("comp-1", Collections.singletonMap("id", "comp-1")); + + ObjectNode partial = objectMapper.createObjectNode(); + partial.putNull("id"); + partial.put("account_id", "acc_new"); + + assertThrows(Exception.class, () -> EntityMerge.partialCompany(existing, partial)); + } + + @Test + void partialCompany_doesNotMutateOriginal() { + Map existingKeys = new HashMap<>(); + existingKeys.put("org_id", "org-1"); + + RulesengineCompany existing = buildCompany("comp-1", existingKeys); + String originalAccountId = existing.getAccountId(); + + ObjectNode partial = objectMapper.createObjectNode(); + partial.put("id", "comp-1"); + partial.put("account_id", "acc_new"); + ObjectNode newKeys = objectMapper.createObjectNode(); + newKeys.put("org_id", "org-updated"); + newKeys.put("new_key", "new-val"); + partial.set("keys", newKeys); + + EntityMerge.partialCompany(existing, partial); + + assertEquals(originalAccountId, existing.getAccountId()); + assertEquals("org-1", existing.getKeys().get("org_id")); + assertNull(existing.getKeys().get("new_key")); + } + + @Test + void partialCompany_replacesRules() { + RulesengineRule originalRule = RulesengineRule.builder() + .accountId("acc_1") + .environmentId("env_1") + .id("rule-1") + .name("Original Rule") + .priority(1) + .ruleType(RulesengineRuleRuleType.GLOBAL_OVERRIDE) + .value(true) + .build(); + + RulesengineCompany existing = RulesengineCompany.builder() + .accountId("acc_1") + .environmentId("env_1") + .id("comp-1") + .keys(Collections.singletonMap("id", "comp-1")) + .traits(Collections.emptyList()) + .metrics(Collections.emptyList()) + .rules(Collections.singletonList(originalRule)) + .billingProductIds(Collections.emptyList()) + .creditBalances(Collections.emptyMap()) + .planIds(Collections.emptyList()) + .planVersionIds(Collections.emptyList()) + .build(); + + ObjectNode partial = objectMapper.createObjectNode(); + partial.put("id", "comp-1"); + ArrayNode newRules = objectMapper.createArrayNode(); + ObjectNode rule = objectMapper.createObjectNode(); + rule.put("account_id", "acc_1"); + rule.put("environment_id", "env_1"); + rule.put("id", "rule-2"); + rule.put("name", "Replacement Rule"); + rule.put("priority", 2); + rule.put("rule_type", "global_override"); + rule.put("value", false); + newRules.add(rule); + partial.set("rules", newRules); + + RulesengineCompany merged = EntityMerge.partialCompany(existing, partial); + + assertEquals(1, merged.getRules().size()); + assertEquals("rule-2", merged.getRules().get(0).getId()); + assertEquals("Replacement Rule", merged.getRules().get(0).getName()); + assertEquals(1, existing.getRules().size()); + assertEquals("rule-1", existing.getRules().get(0).getId()); + } + + @Test + void partialCompany_fullEntityPartialMessage() { + RulesengineCompany existing = buildCompany("comp-1", Collections.singletonMap("id", "comp-1")); + + ObjectNode partial = objectMapper.createObjectNode(); + partial.put("id", "comp-1"); + partial.put("account_id", "acc_new"); + partial.put("environment_id", "env_new"); + + ObjectNode newKeys = objectMapper.createObjectNode(); + newKeys.put("org_id", "org-new"); + partial.set("keys", newKeys); + + ArrayNode newTraits = objectMapper.createArrayNode(); + ObjectNode trait = objectMapper.createObjectNode(); + trait.put("value", "full-trait"); + newTraits.add(trait); + partial.set("traits", newTraits); + + ObjectNode newBalances = objectMapper.createObjectNode(); + newBalances.put("credit-x", 999.0); + partial.set("credit_balances", newBalances); + + ArrayNode newRules = objectMapper.createArrayNode(); + ObjectNode rule = objectMapper.createObjectNode(); + rule.put("account_id", "acc_new"); + rule.put("environment_id", "env_new"); + rule.put("id", "rule-full"); + rule.put("name", "Full Rule"); + rule.put("priority", 1); + rule.put("rule_type", "global_override"); + rule.put("value", true); + newRules.add(rule); + partial.set("rules", newRules); + + RulesengineCompany merged = EntityMerge.partialCompany(existing, partial); + + assertEquals("acc_new", merged.getAccountId()); + assertEquals("env_new", merged.getEnvironmentId()); + assertEquals("org-new", merged.getKeys().get("org_id")); + assertEquals(1, merged.getTraits().size()); + assertEquals("full-trait", merged.getTraits().get(0).getValue()); + assertEquals(999.0, merged.getCreditBalances().get("credit-x")); + assertEquals(1, merged.getRules().size()); + assertEquals("rule-full", merged.getRules().get(0).getId()); + } + + // --- User partial merge tests (additional) --- + + @Test + void partialUser_onlyTraits() { + Map existingKeys = new HashMap<>(); + existingKeys.put("email", "test@example.com"); + + RulesengineUser existing = RulesengineUser.builder() + .accountId("acc_1") + .environmentId("env_1") + .id("user-1") + .keys(existingKeys) + .traits(Collections.singletonList( + RulesengineTrait.builder().value("old-trait").build())) + .rules(Collections.emptyList()) + .build(); + + ObjectNode partial = objectMapper.createObjectNode(); + partial.put("id", "user-1"); + ArrayNode newTraits = objectMapper.createArrayNode(); + ObjectNode trait = objectMapper.createObjectNode(); + trait.put("value", "new-trait"); + newTraits.add(trait); + partial.set("traits", newTraits); + + RulesengineUser merged = EntityMerge.partialUser(existing, partial); + + assertEquals(1, merged.getTraits().size()); + assertEquals("new-trait", merged.getTraits().get(0).getValue()); + assertEquals("test@example.com", merged.getKeys().get("email")); + } + + @Test + void partialUser_missingIdThrowsError() { + RulesengineUser existing = buildUser("user-1", Collections.singletonMap("id", "user-1")); + + ObjectNode partial = objectMapper.createObjectNode(); + partial.putNull("id"); + partial.put("account_id", "acc_new"); + + assertThrows(Exception.class, () -> EntityMerge.partialUser(existing, partial)); + } + + @Test + void partialUser_doesNotMutateOriginal() { + Map existingKeys = new HashMap<>(); + existingKeys.put("email", "test@example.com"); + + RulesengineUser existing = buildUser("user-1", existingKeys); + String originalAccountId = existing.getAccountId(); + + ObjectNode partial = objectMapper.createObjectNode(); + partial.put("id", "user-1"); + partial.put("account_id", "acc_new"); + ObjectNode newKeys = objectMapper.createObjectNode(); + newKeys.put("email", "updated@example.com"); + newKeys.put("new_key", "new-val"); + partial.set("keys", newKeys); + + EntityMerge.partialUser(existing, partial); + + assertEquals(originalAccountId, existing.getAccountId()); + assertEquals("test@example.com", existing.getKeys().get("email")); + assertNull(existing.getKeys().get("new_key")); + } + + @Test + void partialUser_fullEntityPartialMessage() { + RulesengineUser existing = buildUser("user-1", Collections.singletonMap("email", "old@example.com")); + + ObjectNode partial = objectMapper.createObjectNode(); + partial.put("id", "user-1"); + partial.put("account_id", "acc_new"); + partial.put("environment_id", "env_new"); + + ObjectNode newKeys = objectMapper.createObjectNode(); + newKeys.put("email", "new@example.com"); + newKeys.put("user_id", "u-new"); + partial.set("keys", newKeys); + + ArrayNode newTraits = objectMapper.createArrayNode(); + ObjectNode trait = objectMapper.createObjectNode(); + trait.put("value", "full-trait"); + newTraits.add(trait); + partial.set("traits", newTraits); + + ArrayNode newRules = objectMapper.createArrayNode(); + ObjectNode rule = objectMapper.createObjectNode(); + rule.put("account_id", "acc_new"); + rule.put("environment_id", "env_new"); + rule.put("id", "rule-u1"); + rule.put("name", "User Rule"); + rule.put("priority", 1); + rule.put("rule_type", "global_override"); + rule.put("value", true); + newRules.add(rule); + partial.set("rules", newRules); + + RulesengineUser merged = EntityMerge.partialUser(existing, partial); + + assertEquals("acc_new", merged.getAccountId()); + assertEquals("env_new", merged.getEnvironmentId()); + assertEquals("new@example.com", merged.getKeys().get("email")); + assertEquals("u-new", merged.getKeys().get("user_id")); + assertEquals(1, merged.getTraits().size()); + assertEquals("full-trait", merged.getTraits().get(0).getValue()); + assertEquals(1, merged.getRules().size()); + assertEquals("rule-u1", merged.getRules().get(0).getId()); + } + // --- Helpers --- private RulesengineCompany buildCompany(String id, Map keys) { diff --git a/src/test/java/com/schematic/api/datastream/WasmRulesEngineTest.java b/src/test/java/com/schematic/api/datastream/WasmRulesEngineTest.java index 8d13a08..def7f71 100644 --- a/src/test/java/com/schematic/api/datastream/WasmRulesEngineTest.java +++ b/src/test/java/com/schematic/api/datastream/WasmRulesEngineTest.java @@ -8,10 +8,18 @@ import com.schematic.api.types.RulesengineFlag; import com.schematic.api.types.RulesengineRule; import com.schematic.api.types.RulesengineRuleRuleType; +import com.schematic.api.types.RulesengineTrait; import com.schematic.api.types.RulesengineUser; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicInteger; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -133,6 +141,227 @@ void camelToSnake_convertsCorrectly() { assertEquals("id", WasmRulesEngine.camelToSnake("id")); } + @Test + void checkFlag_standardRule() throws Exception { + List rules = new ArrayList<>(); + rules.add(RulesengineRule.builder() + .accountId("acc_1") + .environmentId("env_1") + .id("rule_standard") + .name("standard") + .priority(1) + .ruleType(RulesengineRuleRuleType.STANDARD) + .value(true) + .conditions(Collections.emptyList()) + .conditionGroups(Collections.emptyList()) + .build()); + + RulesengineFlag flag = buildFlag("standard-flag", false, rules); + + RulesengineCheckFlagResult result = engine.checkFlag(flag, null, null); + + assertNotNull(result); + assertEquals("standard-flag", result.getFlagKey()); + } + + @Test + void checkFlag_companyWithPlanIds() throws Exception { + List rules = new ArrayList<>(); + rules.add(buildRule("rule1", "global_override", true, 1)); + RulesengineFlag flag = buildFlag("plan-flag", false, rules); + + RulesengineCompany company = RulesengineCompany.builder() + .accountId("acc_1") + .environmentId("env_1") + .id("comp-plan") + .keys(Collections.singletonMap("id", "comp-plan")) + .planIds(Arrays.asList("plan_1", "plan_2")) + .traits(Collections.emptyList()) + .metrics(Collections.emptyList()) + .rules(Collections.emptyList()) + .billingProductIds(Collections.emptyList()) + .creditBalances(Collections.emptyMap()) + .planVersionIds(Collections.emptyList()) + .build(); + + RulesengineCheckFlagResult result = engine.checkFlag(flag, company, null); + + assertNotNull(result); + assertTrue(result.getValue()); + } + + @Test + void checkFlag_companyWithTraits() throws Exception { + List rules = new ArrayList<>(); + rules.add(buildRule("rule1", "global_override", true, 1)); + RulesengineFlag flag = buildFlag("trait-flag", false, rules); + + List traits = new ArrayList<>(); + traits.add(RulesengineTrait.builder().value("premium").build()); + + RulesengineCompany company = RulesengineCompany.builder() + .accountId("acc_1") + .environmentId("env_1") + .id("comp-traits") + .keys(Collections.singletonMap("id", "comp-traits")) + .traits(traits) + .metrics(Collections.emptyList()) + .rules(Collections.emptyList()) + .billingProductIds(Collections.emptyList()) + .creditBalances(Collections.emptyMap()) + .planIds(Collections.emptyList()) + .planVersionIds(Collections.emptyList()) + .build(); + + RulesengineCheckFlagResult result = engine.checkFlag(flag, company, null); + + assertNotNull(result); + assertTrue(result.getValue()); + } + + @Test + void checkFlag_companyWithCreditBalances() throws Exception { + List rules = new ArrayList<>(); + rules.add(buildRule("rule1", "global_override", true, 1)); + RulesengineFlag flag = buildFlag("credit-flag", false, rules); + + Map creditBalances = new HashMap<>(); + creditBalances.put("credits", 100.0); + creditBalances.put("bonus_credits", 50.5); + + RulesengineCompany company = RulesengineCompany.builder() + .accountId("acc_1") + .environmentId("env_1") + .id("comp-credits") + .keys(Collections.singletonMap("id", "comp-credits")) + .creditBalances(creditBalances) + .traits(Collections.emptyList()) + .metrics(Collections.emptyList()) + .rules(Collections.emptyList()) + .billingProductIds(Collections.emptyList()) + .planIds(Collections.emptyList()) + .planVersionIds(Collections.emptyList()) + .build(); + + RulesengineCheckFlagResult result = engine.checkFlag(flag, company, null); + + assertNotNull(result); + assertTrue(result.getValue()); + } + + @Test + void checkFlag_companyWithMultipleKeys() throws Exception { + List rules = new ArrayList<>(); + rules.add(buildRule("rule1", "global_override", true, 1)); + RulesengineFlag flag = buildFlag("multikey-flag", false, rules); + + Map keys = new HashMap<>(); + keys.put("id", "comp-multi"); + keys.put("slug", "comp-multi-slug"); + keys.put("external_id", "ext-123"); + + RulesengineCompany company = RulesengineCompany.builder() + .accountId("acc_1") + .environmentId("env_1") + .id("comp-multi") + .keys(keys) + .traits(Collections.emptyList()) + .metrics(Collections.emptyList()) + .rules(Collections.emptyList()) + .billingProductIds(Collections.emptyList()) + .creditBalances(Collections.emptyMap()) + .planIds(Collections.emptyList()) + .planVersionIds(Collections.emptyList()) + .build(); + + RulesengineCheckFlagResult result = engine.checkFlag(flag, company, null); + + assertNotNull(result); + assertTrue(result.getValue()); + } + + @Test + void checkFlag_userWithTraits() throws Exception { + List rules = new ArrayList<>(); + rules.add(buildRule("rule1", "global_override", true, 1)); + RulesengineFlag flag = buildFlag("user-trait-flag", false, rules); + + List traits = new ArrayList<>(); + traits.add(RulesengineTrait.builder().value("admin").build()); + traits.add(RulesengineTrait.builder().value("active").build()); + + RulesengineUser user = RulesengineUser.builder() + .accountId("acc_1") + .environmentId("env_1") + .id("user-traits") + .keys(Collections.singletonMap("id", "user-traits")) + .traits(traits) + .rules(Collections.emptyList()) + .build(); + + RulesengineCheckFlagResult result = engine.checkFlag(flag, null, user); + + assertNotNull(result); + assertTrue(result.getValue()); + } + + @Test + void checkFlag_consistentResultsForIdenticalInputs() throws Exception { + List rules = new ArrayList<>(); + rules.add(buildRule("rule1", "global_override", true, 1)); + RulesengineFlag flag = buildFlag("determinism-flag", false, rules); + RulesengineCompany company = buildCompany("comp-det"); + RulesengineUser user = buildUser("user-det"); + + RulesengineCheckFlagResult result1 = engine.checkFlag(flag, company, user); + RulesengineCheckFlagResult result2 = engine.checkFlag(flag, company, user); + RulesengineCheckFlagResult result3 = engine.checkFlag(flag, company, user); + + assertNotNull(result1); + assertNotNull(result2); + assertNotNull(result3); + assertEquals(result1.getValue(), result2.getValue()); + assertEquals(result2.getValue(), result3.getValue()); + assertEquals(result1.getFlagKey(), result2.getFlagKey()); + assertEquals(result2.getFlagKey(), result3.getFlagKey()); + } + + @Test + void checkFlag_concurrentOperations() throws Exception { + int threadCount = 10; + ExecutorService executor = Executors.newFixedThreadPool(threadCount); + CountDownLatch startLatch = new CountDownLatch(1); + CountDownLatch doneLatch = new CountDownLatch(threadCount); + AtomicInteger errorCount = new AtomicInteger(0); + + List rules = new ArrayList<>(); + rules.add(buildRule("rule1", "global_override", true, 1)); + RulesengineFlag flag = buildFlag("concurrent-flag", false, rules); + RulesengineCompany company = buildCompany("comp-conc"); + RulesengineUser user = buildUser("user-conc"); + + for (int i = 0; i < threadCount; i++) { + executor.execute(() -> { + try { + startLatch.await(); + RulesengineCheckFlagResult result = engine.checkFlag(flag, company, user); + assertNotNull(result); + assertTrue(result.getValue()); + } catch (Exception e) { + errorCount.incrementAndGet(); + } finally { + doneLatch.countDown(); + } + }); + } + + startLatch.countDown(); + doneLatch.await(); + executor.shutdown(); + + assertEquals(0, errorCount.get(), "Concurrent checkFlag calls should complete without errors"); + } + // --- Helpers using generated types --- private RulesengineFlag buildFlag(String key, boolean defaultValue, List rules) { From e0c7a36fbb22ad26abd132122e0e9bcfa0863043 Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Thu, 2 Apr 2026 09:26:20 -0600 Subject: [PATCH 08/27] add http event sender close method --- .../java/com/schematic/api/HttpEventSender.java | 13 ++++++++++--- src/main/java/com/schematic/api/Schematic.java | 5 ++++- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/src/main/java/com/schematic/api/HttpEventSender.java b/src/main/java/com/schematic/api/HttpEventSender.java index 0204c48..6605668 100644 --- a/src/main/java/com/schematic/api/HttpEventSender.java +++ b/src/main/java/com/schematic/api/HttpEventSender.java @@ -7,6 +7,7 @@ import com.schematic.api.core.ObjectMappers; import com.schematic.api.logger.SchematicLogger; import com.schematic.api.types.CreateEventRequestBody; +import java.io.Closeable; import java.io.IOException; import java.util.List; import okhttp3.MediaType; @@ -16,14 +17,14 @@ import okhttp3.Response; /** - * Sends event batches directly to the Schematic event capture service, - * matching the Go SDK's behavior of posting to https://c.schematichq.com/batch. + * Sends event batches directly to the Schematic event capture service + * by posting to https://c.schematichq.com/batch. * *

Each event payload is built from the Fern-generated {@link CreateEventRequestBody} model * with {@code api_key} injected, so any fields added to the generated model are automatically * included in the capture service payload. */ -public class HttpEventSender { +public class HttpEventSender implements Closeable { private static final String DEFAULT_EVENT_CAPTURE_BASE_URL = "https://c.schematichq.com"; private static final MediaType JSON = MediaType.get("application/json; charset=utf-8"); @@ -84,4 +85,10 @@ public void sendBatch(List events) throws IOException { } } } + + @Override + public void close() { + httpClient.dispatcher().executorService().shutdownNow(); + httpClient.connectionPool().evictAll(); + } } diff --git a/src/main/java/com/schematic/api/Schematic.java b/src/main/java/com/schematic/api/Schematic.java index c4ba9d9..20cc801 100644 --- a/src/main/java/com/schematic/api/Schematic.java +++ b/src/main/java/com/schematic/api/Schematic.java @@ -39,6 +39,7 @@ public final class Schematic extends BaseSchematic implements AutoCloseable { private final String apiKey; private final Thread shutdownHook; private final boolean offline; + private final HttpEventSender eventSender; private final DataStreamClient dataStreamClient; private final DatastreamOptions datastreamOptions; @@ -56,7 +57,7 @@ private Schematic(Builder builder) { : Collections.singletonList(new LocalCache()); this.datastreamOptions = builder.datastreamOptions; - HttpEventSender eventSender = new HttpEventSender(null, this.apiKey, builder.eventCaptureBaseUrl, this.logger); + this.eventSender = new HttpEventSender(null, this.apiKey, builder.eventCaptureBaseUrl, this.logger); this.eventBuffer = new EventBuffer( eventSender, this.logger, @@ -92,6 +93,7 @@ private Schematic(Builder builder) { this.dataStreamClient.close(); } this.eventBuffer.close(); + this.eventSender.close(); } catch (Exception e) { logger.error("Error during Schematic shutdown: " + e.getMessage()); } @@ -400,6 +402,7 @@ public void close() { dataStreamClient.close(); } eventBuffer.close(); + eventSender.close(); } catch (Exception e) { logger.error("Error closing Schematic client: " + e.getMessage()); } From ae4a5e0b9ad05656c899f55e5f2976280be3aba3 Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Thu, 2 Apr 2026 09:26:32 -0600 Subject: [PATCH 09/27] document java 11+ support --- CLAUDE.md | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 0e85abf..c575422 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -48,7 +48,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co ## Architecture Overview -This repository contains the official Schematic Java SDK, supporting Java 8+. Schematic is a feature flag and product analytics service. +This repository contains the official Schematic Java SDK, supporting Java 11+. Schematic is a feature flag and product analytics service. ### Key Components diff --git a/README.md b/README.md index cca1513..ac18c66 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Schematic Java Library -The official Schematic Java library, supporting Java 8+. +The official Schematic Java library, supporting Java 11+. ## Installation and Setup From 761be04fa3b99cf54ec94e05864d0c99a54341ec Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Thu, 2 Apr 2026 09:26:55 -0600 Subject: [PATCH 10/27] impement pending request functionality --- .../api/datastream/DataStreamClient.java | 235 +++++++++++++++++- 1 file changed, 224 insertions(+), 11 deletions(-) diff --git a/src/main/java/com/schematic/api/datastream/DataStreamClient.java b/src/main/java/com/schematic/api/datastream/DataStreamClient.java index da5bec2..7800efd 100644 --- a/src/main/java/com/schematic/api/datastream/DataStreamClient.java +++ b/src/main/java/com/schematic/api/datastream/DataStreamClient.java @@ -17,11 +17,16 @@ import com.schematic.api.types.RulesengineUser; import java.io.Closeable; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import okhttp3.OkHttpClient; import okhttp3.Request; @@ -32,7 +37,7 @@ * caches entities (flags, companies, users), and provides flag checking. * *

Entities are cached as typed objects ({@link RulesengineFlag}, {@link RulesengineCompany}, - * {@link RulesengineUser}) matching the approach used by the Python and Go SDKs. + * {@link RulesengineUser}). */ public class DataStreamClient implements Closeable { @@ -43,6 +48,9 @@ public class DataStreamClient implements Closeable { static final String USER_PREFIX = "user:"; static final String USER_KEY_PREFIX = "user_key:"; + // Timeout for waiting on entity responses from WebSocket + private static final long RESOURCE_TIMEOUT_MS = 2_000; + private final DatastreamOptions options; private final String apiKey; private final String apiUrl; @@ -55,6 +63,12 @@ public class DataStreamClient implements Closeable { private final CacheProvider companyCache; private final CacheProvider userCache; + // Pending entity requests: cache key -> list of futures waiting for that entity. + private final ConcurrentHashMap>> pendingCompanyRequests = + new ConcurrentHashMap<>(); + private final ConcurrentHashMap>> pendingUserRequests = + new ConcurrentHashMap<>(); + // WebSocket client (direct mode only) private volatile DataStreamWebSocketClient wsClient; @@ -157,21 +171,196 @@ public RulesengineCheckFlagResult checkFlag(String flagKey, Map return evaluateFlag(flag, cachedCompany, cachedUser); } - // Step 5: Direct mode - fetch missing entities via datastream + // Step 5: Direct mode - fetch missing entities via datastream and wait for response if (!isConnected()) { throw new DataStreamException("Datastream not connected and required entities not in cache"); } if (needsCompany && cachedCompany == null) { - requestEntity(EntityType.COMPANY, company); + cachedCompany = getCompany(company); } if (needsUser && cachedUser == null) { - requestEntity(EntityType.USER, user); + cachedUser = getUser(user); } return evaluateFlag(flag, cachedCompany, cachedUser); } + /** + * Fetches a company via the datastream WebSocket, waiting for the response with a timeout. + * Deduplicates concurrent requests for the same entity. + * Uses futures with timeout to wait for the response, deduplicating concurrent requests. + */ + private RulesengineCompany getCompany(Map keys) { + // Check cache first + RulesengineCompany cached = getCachedCompany(keys); + if (cached != null) { + return cached; + } + + CompletableFuture future = new CompletableFuture<>(); + boolean shouldSendRequest = registerPendingCompanyRequest(keys, future); + + if (shouldSendRequest) { + requestEntity(EntityType.COMPANY, keys); + } + + try { + return future.get(RESOURCE_TIMEOUT_MS, TimeUnit.MILLISECONDS); + } catch (TimeoutException e) { + log("warn", "Timeout waiting for company data"); + } catch (Exception e) { + log("warn", "Error waiting for company data: " + e.getMessage()); + } finally { + cleanupPendingCompanyRequests(keys, future); + } + + return null; + } + + /** + * Fetches a user via the datastream WebSocket, waiting for the response with a timeout. + * Deduplicates concurrent requests for the same entity. + */ + private RulesengineUser getUser(Map keys) { + // Check cache first + RulesengineUser cached = getCachedUser(keys); + if (cached != null) { + return cached; + } + + CompletableFuture future = new CompletableFuture<>(); + boolean shouldSendRequest = registerPendingUserRequest(keys, future); + + if (shouldSendRequest) { + requestEntity(EntityType.USER, keys); + } + + try { + return future.get(RESOURCE_TIMEOUT_MS, TimeUnit.MILLISECONDS); + } catch (TimeoutException e) { + log("warn", "Timeout waiting for user data"); + } catch (Exception e) { + log("warn", "Error waiting for user data: " + e.getMessage()); + } finally { + cleanupPendingUserRequests(keys, future); + } + + return null; + } + + /** + * Registers a future for a pending company request. Returns true if this is the + * first request for this entity (meaning the caller should send the WebSocket message). + */ + private boolean registerPendingCompanyRequest( + Map keys, CompletableFuture future) { + boolean shouldSend = true; + for (Map.Entry entry : keys.entrySet()) { + String cacheKey = companyCacheKey(entry.getKey(), entry.getValue()); + synchronized (pendingCompanyRequests) { + List> existing = pendingCompanyRequests.get(cacheKey); + if (existing != null) { + // Another thread already requested this entity + existing.add(future); + shouldSend = false; + } else { + List> futures = new ArrayList<>(); + futures.add(future); + pendingCompanyRequests.put(cacheKey, futures); + } + } + } + return shouldSend; + } + + /** + * Registers a future for a pending user request. Returns true if this is the + * first request for this entity (meaning the caller should send the WebSocket message). + */ + private boolean registerPendingUserRequest(Map keys, CompletableFuture future) { + boolean shouldSend = true; + for (Map.Entry entry : keys.entrySet()) { + String cacheKey = userCacheKey(entry.getKey(), entry.getValue()); + synchronized (pendingUserRequests) { + List> existing = pendingUserRequests.get(cacheKey); + if (existing != null) { + existing.add(future); + shouldSend = false; + } else { + List> futures = new ArrayList<>(); + futures.add(future); + pendingUserRequests.put(cacheKey, futures); + } + } + } + return shouldSend; + } + + /** + * Notifies all pending futures waiting for a company with the given keys. + */ + private void notifyPendingCompanyRequests(Map keys, RulesengineCompany company) { + synchronized (pendingCompanyRequests) { + for (Map.Entry entry : keys.entrySet()) { + String cacheKey = companyCacheKey(entry.getKey(), entry.getValue()); + List> futures = pendingCompanyRequests.remove(cacheKey); + if (futures != null) { + for (CompletableFuture future : futures) { + future.complete(company); + } + } + } + } + } + + /** + * Notifies all pending futures waiting for a user with the given keys. + */ + private void notifyPendingUserRequests(Map keys, RulesengineUser user) { + synchronized (pendingUserRequests) { + for (Map.Entry entry : keys.entrySet()) { + String cacheKey = userCacheKey(entry.getKey(), entry.getValue()); + List> futures = pendingUserRequests.remove(cacheKey); + if (futures != null) { + for (CompletableFuture future : futures) { + future.complete(user); + } + } + } + } + } + + private void cleanupPendingCompanyRequests(Map keys, CompletableFuture future) { + synchronized (pendingCompanyRequests) { + for (Map.Entry entry : keys.entrySet()) { + String cacheKey = companyCacheKey(entry.getKey(), entry.getValue()); + List> futures = pendingCompanyRequests.get(cacheKey); + if (futures != null) { + futures.remove(future); + if (futures.isEmpty()) { + pendingCompanyRequests.remove(cacheKey); + } + } + } + } + } + + private void cleanupPendingUserRequests(Map keys, CompletableFuture future) { + synchronized (pendingUserRequests) { + for (Map.Entry entry : keys.entrySet()) { + String cacheKey = userCacheKey(entry.getKey(), entry.getValue()); + List> futures = pendingUserRequests.get(cacheKey); + if (futures != null) { + futures.remove(future); + if (futures.isEmpty()) { + pendingUserRequests.remove(cacheKey); + } + } + } + } + } + /** * Evaluates a flag using the rules engine. Falls back to the flag's default value * if the rules engine is not available. @@ -232,10 +421,9 @@ public RulesengineCompany getCachedCompany(Map keys) { if (keys == null || keys.isEmpty()) { return null; } - // Look up by first key pair, matching the Node/Python SDK approach + // Look up by first key pair Map.Entry first = keys.entrySet().iterator().next(); - String cacheKey = COMPANY_KEY_PREFIX + first.getKey() + ":" + first.getValue(); - return companyCache.get(cacheKey); + return companyCache.get(companyCacheKey(first.getKey(), first.getValue())); } /** @@ -246,8 +434,7 @@ public RulesengineUser getCachedUser(Map keys) { return null; } Map.Entry first = keys.entrySet().iterator().next(); - String cacheKey = USER_KEY_PREFIX + first.getKey() + ":" + first.getValue(); - return userCache.get(cacheKey); + return userCache.get(userCacheKey(first.getKey(), first.getValue())); } /** @@ -462,6 +649,13 @@ private void handleCompanyMessage(DataStreamResp message, MessageType messageTyp } else if (messageType == MessageType.DELETE) { String entityId = message.getEntityId(); if (entityId != null) { + // Clean up key-based cache entries before removing by ID + RulesengineCompany existing = companyCache.get(COMPANY_PREFIX + entityId); + if (existing != null) { + for (Map.Entry entry : existing.getKeys().entrySet()) { + companyCache.set(companyCacheKey(entry.getKey(), entry.getValue()), null); + } + } companyCache.set(COMPANY_PREFIX + entityId, null); } } @@ -499,6 +693,13 @@ private void handleUserMessage(DataStreamResp message, MessageType messageType) } else if (messageType == MessageType.DELETE) { String entityId = message.getEntityId(); if (entityId != null) { + // Clean up key-based cache entries before removing by ID + RulesengineUser existing = userCache.get(USER_PREFIX + entityId); + if (existing != null) { + for (Map.Entry entry : existing.getKeys().entrySet()) { + userCache.set(userCacheKey(entry.getKey(), entry.getValue()), null); + } + } userCache.set(USER_PREFIX + entityId, null); } } @@ -537,9 +738,11 @@ private void cacheCompany(JsonNode data) { private void cacheCompanyObject(RulesengineCompany company) { companyCache.set(COMPANY_PREFIX + company.getId(), company); for (Map.Entry entry : company.getKeys().entrySet()) { - String cacheKey = COMPANY_KEY_PREFIX + entry.getKey() + ":" + entry.getValue(); + String cacheKey = companyCacheKey(entry.getKey(), entry.getValue()); companyCache.set(cacheKey, company); } + // Notify any pending requests waiting for this company + notifyPendingCompanyRequests(company.getKeys(), company); } private void cacheUser(JsonNode data) { @@ -554,9 +757,19 @@ private void cacheUser(JsonNode data) { private void cacheUserObject(RulesengineUser user) { userCache.set(USER_PREFIX + user.getId(), user); for (Map.Entry entry : user.getKeys().entrySet()) { - String cacheKey = USER_KEY_PREFIX + entry.getKey() + ":" + entry.getValue(); + String cacheKey = userCacheKey(entry.getKey(), entry.getValue()); userCache.set(cacheKey, user); } + // Notify any pending requests waiting for this user + notifyPendingUserRequests(user.getKeys(), user); + } + + private static String companyCacheKey(String key, String value) { + return COMPANY_KEY_PREFIX + key + ":" + value; + } + + private static String userCacheKey(String key, String value) { + return USER_KEY_PREFIX + key + ":" + value; } private void log(String level, String message) { From fa2e9f190b23e440f6440c35f5e95b7c5b805514 Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Thu, 2 Apr 2026 09:27:05 -0600 Subject: [PATCH 11/27] update comments --- .../schematic/api/datastream/DataStreamMessages.java | 2 +- .../api/datastream/DataStreamWebSocketClient.java | 5 ++--- .../java/com/schematic/api/datastream/EntityMerge.java | 5 ++--- .../com/schematic/api/datastream/WasmRulesEngine.java | 10 +++++++--- 4 files changed, 12 insertions(+), 10 deletions(-) diff --git a/src/main/java/com/schematic/api/datastream/DataStreamMessages.java b/src/main/java/com/schematic/api/datastream/DataStreamMessages.java index 0f777c3..73c0205 100644 --- a/src/main/java/com/schematic/api/datastream/DataStreamMessages.java +++ b/src/main/java/com/schematic/api/datastream/DataStreamMessages.java @@ -117,7 +117,7 @@ public Map getKeys() { } } - /** Wrapper for request messages (matches Go's DataStreamBaseReq). */ + /** Wrapper for request messages. */ public static class DataStreamBaseReq { @JsonProperty("data") private final DataStreamReq data; diff --git a/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java b/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java index 32504ee..4aed0b4 100644 --- a/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java +++ b/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java @@ -27,8 +27,7 @@ * WebSocket client for the Schematic DataStream protocol. * *

Provides automatic reconnection with exponential backoff, ping/pong keep-alive, - * and a message worker pool for processing incoming messages. Mirrors the Go - * {@code schematic-datastream-ws} package. + * and a message worker pool for processing incoming messages. */ public class DataStreamWebSocketClient implements Closeable { @@ -42,7 +41,7 @@ public interface ConnectionReadyHandler { void handle() throws Exception; } - // Default constants (matching Go SDK) + // Default constants private static final long DEFAULT_PONG_WAIT_MS = 40_000; private static final long DEFAULT_PING_PERIOD_MS = 30_000; private static final long WRITE_WAIT_MS = 10_000; diff --git a/src/main/java/com/schematic/api/datastream/EntityMerge.java b/src/main/java/com/schematic/api/datastream/EntityMerge.java index 236941c..76d8c7e 100644 --- a/src/main/java/com/schematic/api/datastream/EntityMerge.java +++ b/src/main/java/com/schematic/api/datastream/EntityMerge.java @@ -12,7 +12,7 @@ /** * Merge helpers for partial datastream updates. * - *

Matches the Python SDK's merge.py: + *

Merge strategy: *

    *
  • Company: additive merge for {@code keys} and {@code credit_balances}, * upsert for {@code metrics}, replace for all other fields
  • @@ -164,8 +164,7 @@ private static void upsertMetrics(ObjectNode base, JsonNode partialMetrics) { } /** - * Matches metrics by (event_subtype, period, month_reset) key, - * matching the Python SDK's _metric_key() function. + * Matches metrics by the composite key (event_subtype, period, month_reset). */ private static boolean metricsMatch(JsonNode a, JsonNode b) { return textEquals(a, b, "event_subtype") && textEquals(a, b, "period") && textEquals(a, b, "month_reset"); diff --git a/src/main/java/com/schematic/api/datastream/WasmRulesEngine.java b/src/main/java/com/schematic/api/datastream/WasmRulesEngine.java index ad50531..7fe3719 100644 --- a/src/main/java/com/schematic/api/datastream/WasmRulesEngine.java +++ b/src/main/java/com/schematic/api/datastream/WasmRulesEngine.java @@ -32,7 +32,7 @@ * flag rules locally using cached flag definitions, company data, and user data. * Uses the Chicory pure-Java WASM runtime (no native dependencies). * - *

    Data flow matches the Python/Node SDKs: + *

    Data flow: *

      *
    • Input: typed objects serialized to snake_case JSON via Jackson @JsonProperty annotations
    • *
    • Output: camelCase JSON from WASM, converted to snake_case, deserialized into generated types
    • @@ -136,7 +136,7 @@ public RulesengineCheckFlagResult checkFlag(RulesengineFlag flag, RulesengineCom String resultJson = callWasm(inputJson); // WASM returns camelCase JSON; generated types expect snake_case. - // Convert keys before deserializing, matching the Python SDK's approach. + // Convert keys before deserializing. JsonNode camelNode = mapper.readTree(resultJson); JsonNode snakeNode = camelToSnakeKeys(camelNode); @@ -161,6 +161,11 @@ public String getVersionKey() { } } + /** + * Calls into the WASM runtime. This method is synchronized because the WASM instance + * uses shared linear memory — concurrent calls would corrupt each other's data. + * Under high concurrency, flag evaluations are serialized through this lock. + */ private synchronized String callWasm(String inputJson) { byte[] data = inputJson.getBytes(StandardCharsets.UTF_8); int length = data.length; @@ -193,7 +198,6 @@ private synchronized String callWasm(String inputJson) { /** * Recursively converts JSON object keys from camelCase to snake_case. - * Matches the Python SDK's _deep_camel_to_snake() function. */ static JsonNode camelToSnakeKeys(JsonNode node) { if (node.isObject()) { From 875e393376bfcbe2737133af1da631848b466e0e Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Thu, 2 Apr 2026 09:41:13 -0600 Subject: [PATCH 12/27] download wasm engine in ci actions --- .github/workflows/ci.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1f32646..c13c064 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,6 +2,9 @@ name: ci on: [push] +env: + GH_TOKEN: ${{ secrets.GH_TOKEN }} + concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: false @@ -38,8 +41,12 @@ jobs: java-version: "11" architecture: x64 + - name: Download WASM binary + run: ./scripts/download-wasm.sh + - name: Test run: ./gradlew test + publish: needs: [ compile, test ] if: github.event_name == 'push' && contains(github.ref, 'refs/tags/') @@ -56,6 +63,9 @@ jobs: java-version: "11" architecture: x64 + - name: Download WASM binary + run: ./scripts/download-wasm.sh + - name: Publish to maven run: | ./gradlew sonatypeCentralUpload From a1118d690475ee9912834d4bb5499c00796f6dd4 Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Thu, 2 Apr 2026 14:15:52 -0600 Subject: [PATCH 13/27] normalize parmeters --- src/main/java/com/schematic/api/EventBuffer.java | 2 +- .../schematic/api/datastream/DataStreamWebSocketClient.java | 5 ++--- .../com/schematic/api/datastream/DatastreamOptions.java | 6 +++--- src/test/java/com/schematic/api/TestEventBuffer.java | 4 ++-- 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/src/main/java/com/schematic/api/EventBuffer.java b/src/main/java/com/schematic/api/EventBuffer.java index 804f4e9..b723fb4 100644 --- a/src/main/java/com/schematic/api/EventBuffer.java +++ b/src/main/java/com/schematic/api/EventBuffer.java @@ -18,7 +18,7 @@ public class EventBuffer implements AutoCloseable { private static final int DEFAULT_MAX_BATCH_SIZE = 100; private static final int DEFAULT_MAX_QUEUE_SIZE = 10_000; private static final int MAX_RETRY_ATTEMPTS = 3; - private static final Duration RETRY_INITIAL_DELAY = Duration.ofMillis(100); + private static final Duration RETRY_INITIAL_DELAY = Duration.ofMillis(1000); private final ConcurrentLinkedQueue events; private final int maxBatchSize; diff --git a/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java b/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java index 4aed0b4..411626e 100644 --- a/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java +++ b/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java @@ -42,9 +42,8 @@ public interface ConnectionReadyHandler { } // Default constants - private static final long DEFAULT_PONG_WAIT_MS = 40_000; - private static final long DEFAULT_PING_PERIOD_MS = 30_000; - private static final long WRITE_WAIT_MS = 10_000; + private static final long DEFAULT_PONG_WAIT_MS = 60_000; + private static final long DEFAULT_PING_PERIOD_MS = 54_000; private static final int DEFAULT_MAX_RECONNECT_ATTEMPTS = 10; private static final long DEFAULT_MIN_RECONNECT_DELAY_MS = 1_000; private static final long DEFAULT_MAX_RECONNECT_DELAY_MS = 30_000; diff --git a/src/main/java/com/schematic/api/datastream/DatastreamOptions.java b/src/main/java/com/schematic/api/datastream/DatastreamOptions.java index 9f5041a..f693480 100644 --- a/src/main/java/com/schematic/api/datastream/DatastreamOptions.java +++ b/src/main/java/com/schematic/api/datastream/DatastreamOptions.java @@ -36,12 +36,12 @@ private DatastreamOptions(Builder builder) { ? builder.replicatorHealthCheckInterval : DEFAULT_REPLICATOR_HEALTH_CHECK_INTERVAL; this.flagCacheProvider = - builder.flagCacheProvider != null ? builder.flagCacheProvider : new LocalCache<>(10_000, this.cacheTTL); + builder.flagCacheProvider != null ? builder.flagCacheProvider : new LocalCache<>(1_000, this.cacheTTL); this.companyCacheProvider = builder.companyCacheProvider != null ? builder.companyCacheProvider - : new LocalCache<>(10_000, this.cacheTTL); + : new LocalCache<>(1_000, this.cacheTTL); this.userCacheProvider = - builder.userCacheProvider != null ? builder.userCacheProvider : new LocalCache<>(10_000, this.cacheTTL); + builder.userCacheProvider != null ? builder.userCacheProvider : new LocalCache<>(1_000, this.cacheTTL); } public static Builder builder() { diff --git a/src/test/java/com/schematic/api/TestEventBuffer.java b/src/test/java/com/schematic/api/TestEventBuffer.java index 137cdf4..978330b 100644 --- a/src/test/java/com/schematic/api/TestEventBuffer.java +++ b/src/test/java/com/schematic/api/TestEventBuffer.java @@ -123,8 +123,8 @@ void flushWithError_ShouldLogError() throws IOException { try { // Wait for all retries to complete - // Initial delay + 2nd retry + 3rd retry = 100ms + 200ms + 400ms = 700ms - Thread.sleep(800); + // Initial delay + 2nd retry + 3rd retry = 1000ms + 2000ms + 4000ms = 7000ms + Thread.sleep(8000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); // Restore the interrupt flag fail("Test was interrupted while waiting for retries"); From 2b299edd1340b07ab4ca8183c62a28e8558d0ef8 Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Thu, 2 Apr 2026 14:18:41 -0600 Subject: [PATCH 14/27] enqueue event when doing clientside evaluation --- .../java/com/schematic/api/Schematic.java | 31 ++++++++++++++++++- .../api/datastream/DataStreamClient.java | 1 + 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/src/main/java/com/schematic/api/Schematic.java b/src/main/java/com/schematic/api/Schematic.java index 20cc801..8f00c7a 100644 --- a/src/main/java/com/schematic/api/Schematic.java +++ b/src/main/java/com/schematic/api/Schematic.java @@ -17,6 +17,7 @@ import com.schematic.api.types.CheckFlagResponseData; import com.schematic.api.types.CreateEventRequestBody; import com.schematic.api.types.EventBody; +import com.schematic.api.types.EventBodyFlagCheck; import com.schematic.api.types.EventBodyIdentify; import com.schematic.api.types.EventBodyIdentifyCompany; import com.schematic.api.types.EventBodyTrack; @@ -269,7 +270,35 @@ public RulesengineCheckFlagResult checkFlagWithEntitlement( // Try datastream first if available if (dataStreamClient != null && dataStreamClient.isConnected()) { try { - return dataStreamClient.checkFlag(flagKey, company, user); + RulesengineCheckFlagResult result = dataStreamClient.checkFlag(flagKey, company, user); + + // Enqueue flag_check event for analytics + try { + EventBodyFlagCheck flagCheckBody = EventBodyFlagCheck.builder() + .flagKey(flagKey) + .reason(result.getReason()) + .value(result.getValue()) + .companyId(result.getCompanyId().orElse(null)) + .userId(result.getUserId().orElse(null)) + .flagId(result.getFlagId().orElse(null)) + .ruleId(result.getRuleId().orElse(null)) + .reqCompany(company) + .reqUser(user) + .error(result.getErr().orElse(null)) + .build(); + + CreateEventRequestBody event = CreateEventRequestBody.builder() + .eventType(EventType.FLAG_CHECK) + .body(EventBody.of(flagCheckBody)) + .sentAt(OffsetDateTime.now()) + .build(); + + eventBuffer.push(event); + } catch (Exception e) { + logger.error("Failed to enqueue flag_check event: " + e.getMessage()); + } + + return result; } catch (Exception e) { logger.debug( "Datastream flag check failed for " + flagKey + ", falling back to API: " + e.getMessage()); diff --git a/src/main/java/com/schematic/api/datastream/DataStreamClient.java b/src/main/java/com/schematic/api/datastream/DataStreamClient.java index 7800efd..3012ce0 100644 --- a/src/main/java/com/schematic/api/datastream/DataStreamClient.java +++ b/src/main/java/com/schematic/api/datastream/DataStreamClient.java @@ -11,6 +11,7 @@ import com.schematic.api.datastream.DataStreamMessages.EntityType; import com.schematic.api.datastream.DataStreamMessages.MessageType; import com.schematic.api.logger.SchematicLogger; +import com.schematic.api.types.EventBodyTrack; import com.schematic.api.types.RulesengineCheckFlagResult; import com.schematic.api.types.RulesengineCompany; import com.schematic.api.types.RulesengineFlag; From 3222c5f71b204559c843f2187be31dba0a85d2b0 Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Thu, 2 Apr 2026 14:21:40 -0600 Subject: [PATCH 15/27] add update company metrics --- .../java/com/schematic/api/Schematic.java | 9 ++ .../api/datastream/DataStreamClient.java | 49 ++++++++++ .../api/datastream/DataStreamClientTest.java | 91 +++++++++++++++++++ 3 files changed, 149 insertions(+) diff --git a/src/main/java/com/schematic/api/Schematic.java b/src/main/java/com/schematic/api/Schematic.java index 8f00c7a..fd59a3d 100644 --- a/src/main/java/com/schematic/api/Schematic.java +++ b/src/main/java/com/schematic/api/Schematic.java @@ -412,6 +412,15 @@ public void track( .build(); eventBuffer.push(event); + + // Update cached company metrics if datastream is active + if (company != null && !company.isEmpty() && dataStreamClient != null && dataStreamClient.isConnected()) { + try { + dataStreamClient.updateCompanyMetrics(body); + } catch (Exception e2) { + logger.error("Failed to update company metrics: " + e2.getMessage()); + } + } } catch (Exception e) { logger.error("Error sending track event: " + e.getMessage()); } diff --git a/src/main/java/com/schematic/api/datastream/DataStreamClient.java b/src/main/java/com/schematic/api/datastream/DataStreamClient.java index 3012ce0..2ce696b 100644 --- a/src/main/java/com/schematic/api/datastream/DataStreamClient.java +++ b/src/main/java/com/schematic/api/datastream/DataStreamClient.java @@ -14,6 +14,7 @@ import com.schematic.api.types.EventBodyTrack; import com.schematic.api.types.RulesengineCheckFlagResult; import com.schematic.api.types.RulesengineCompany; +import com.schematic.api.types.RulesengineCompanyMetric; import com.schematic.api.types.RulesengineFlag; import com.schematic.api.types.RulesengineUser; import java.io.Closeable; @@ -408,6 +409,48 @@ RulesengineCheckFlagResult evaluateFlag(RulesengineFlag flag, RulesengineCompany .build(); } + /** + * Updates cached company metrics locally when a track event is received. + * Increments metric values matching the event name by the event quantity. + */ + public void updateCompanyMetrics(EventBodyTrack event) { + if (event == null) { + return; + } + + Map keys = event.getCompany().orElse(null); + if (keys == null || keys.isEmpty()) { + return; + } + + RulesengineCompany company = getCachedCompany(keys); + if (company == null) { + return; + } + + String eventName = event.getEvent(); + int quantity = event.getQuantity().orElse(1); + + List updatedMetrics = new ArrayList<>(); + for (RulesengineCompanyMetric metric : company.getMetrics()) { + if (eventName.equals(metric.getEventSubtype())) { + updatedMetrics.add(RulesengineCompanyMetric.builder() + .from(metric) + .value(metric.getValue() + quantity) + .build()); + } else { + updatedMetrics.add(metric); + } + } + + RulesengineCompany updated = RulesengineCompany.builder() + .from(company) + .metrics(updatedMetrics) + .build(); + + cacheCompanyObject(updated); + } + /** * Retrieves a cached flag definition by key. */ @@ -603,9 +646,15 @@ private void handleFlagMessage(DataStreamResp message, MessageType messageType) if (messageType == MessageType.FULL) { if (data.isArray()) { + List cacheKeys = new ArrayList<>(); for (JsonNode flagData : data) { cacheFlag(flagData); + String key = flagData.has("key") ? flagData.get("key").asText() : null; + if (key != null) { + cacheKeys.add(FLAG_PREFIX + key); + } } + flagCache.deleteMissing(cacheKeys); } else { cacheFlag(data); } diff --git a/src/test/java/com/schematic/api/datastream/DataStreamClientTest.java b/src/test/java/com/schematic/api/datastream/DataStreamClientTest.java index c11e691..73ffbde 100644 --- a/src/test/java/com/schematic/api/datastream/DataStreamClientTest.java +++ b/src/test/java/com/schematic/api/datastream/DataStreamClientTest.java @@ -11,6 +11,7 @@ import com.schematic.api.datastream.DataStreamMessages.EntityType; import com.schematic.api.datastream.DataStreamMessages.MessageType; import com.schematic.api.logger.SchematicLogger; +import com.schematic.api.types.EventBodyTrack; import com.schematic.api.types.RulesengineCheckFlagResult; import com.schematic.api.types.RulesengineCompany; import com.schematic.api.types.RulesengineFlag; @@ -583,6 +584,96 @@ void start_throwsAfterClose() { assertThrows(IllegalStateException.class, () -> client.start()); } + // --- deleteMissing tests --- + + @Test + void handleMessage_fullFlags_removesStaleFlags() { + // Cache three flags individually + client.handleMessage( + buildResp(EntityType.FLAG.getValue(), MessageType.FULL.getValue(), null, flagNode("flag-a", true))); + client.handleMessage( + buildResp(EntityType.FLAG.getValue(), MessageType.FULL.getValue(), null, flagNode("flag-b", true))); + client.handleMessage( + buildResp(EntityType.FLAG.getValue(), MessageType.FULL.getValue(), null, flagNode("flag-c", true))); + + assertNotNull(client.getCachedFlag("flag-a")); + assertNotNull(client.getCachedFlag("flag-b")); + assertNotNull(client.getCachedFlag("flag-c")); + + // Send a bulk FLAGS message with only flag-a and flag-b + ArrayNode bulkFlags = objectMapper.createArrayNode(); + bulkFlags.add(flagNode("flag-a", true)); + bulkFlags.add(flagNode("flag-b", false)); + + client.handleMessage(buildResp(EntityType.FLAGS.getValue(), MessageType.FULL.getValue(), null, bulkFlags)); + + // flag-a and flag-b should remain, flag-c should be removed + assertNotNull(client.getCachedFlag("flag-a")); + assertNotNull(client.getCachedFlag("flag-b")); + assertNull(client.getCachedFlag("flag-c")); + } + + // --- updateCompanyMetrics tests --- + + @Test + void updateCompanyMetrics_incrementsMatchingMetric() { + // Cache a company with a metric + ObjectNode companyData = companyNode("comp-metrics", "customer_id", "cust-1"); + ArrayNode metrics = objectMapper.createArrayNode(); + ObjectNode metric = objectMapper.createObjectNode(); + metric.put("account_id", "acc_1"); + metric.put("company_id", "comp-metrics"); + metric.put("environment_id", "env_1"); + metric.put("event_subtype", "api_calls"); + metric.put("period", "current_month"); + metric.put("month_reset", "first"); + metric.put("value", 10); + metric.put("created_at", "2026-01-01T00:00:00Z"); + metrics.add(metric); + companyData.set("metrics", metrics); + + client.handleMessage( + buildResp(EntityType.COMPANY.getValue(), MessageType.FULL.getValue(), "comp-metrics", companyData)); + + // Build a track event matching the metric + EventBodyTrack trackEvent = EventBodyTrack.builder() + .event("api_calls") + .company(Collections.singletonMap("customer_id", "cust-1")) + .quantity(5) + .build(); + + client.updateCompanyMetrics(trackEvent); + + // Verify the metric was incremented + RulesengineCompany updated = client.getCachedCompany(Collections.singletonMap("customer_id", "cust-1")); + assertNotNull(updated); + assertEquals(15, updated.getMetrics().get(0).getValue()); + } + + @Test + void updateCompanyMetrics_nullEvent_noOp() { + assertDoesNotThrow(() -> client.updateCompanyMetrics(null)); + } + + @Test + void updateCompanyMetrics_companyNotInCache_noOp() { + EventBodyTrack trackEvent = EventBodyTrack.builder() + .event("api_calls") + .company(Collections.singletonMap("customer_id", "nonexistent")) + .quantity(1) + .build(); + + assertDoesNotThrow(() -> client.updateCompanyMetrics(trackEvent)); + } + + @Test + void updateCompanyMetrics_noCompanyKeys_noOp() { + EventBodyTrack trackEvent = + EventBodyTrack.builder().event("api_calls").quantity(1).build(); + + assertDoesNotThrow(() -> client.updateCompanyMetrics(trackEvent)); + } + // --- Helper methods: build JSON matching the generated type's @JsonProperty format --- private ObjectNode flagNode(String key, boolean defaultValue) { From 32eb43900b34871d9a9db9def4135b2806cb01f7 Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Thu, 2 Apr 2026 15:03:17 -0600 Subject: [PATCH 16/27] add retry jitter --- src/main/java/com/schematic/api/EventBuffer.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/main/java/com/schematic/api/EventBuffer.java b/src/main/java/com/schematic/api/EventBuffer.java index b723fb4..61af8d7 100644 --- a/src/main/java/com/schematic/api/EventBuffer.java +++ b/src/main/java/com/schematic/api/EventBuffer.java @@ -119,7 +119,10 @@ private void sendBatchWithRetry(List batch, int retryCou } catch (Exception e) { if (retryCount < MAX_RETRY_ATTEMPTS) { - long delayMillis = RETRY_INITIAL_DELAY.toMillis() * (1L << retryCount); + long baseDelay = RETRY_INITIAL_DELAY.toMillis() * (1L << retryCount); + // Add ±25% jitter + double jitter = (Math.random() - 0.5) * 0.5 * baseDelay; + long delayMillis = Math.max(0, baseDelay + (long) jitter); logger.warn( "Failed to send event batch, attempting retry %d of %d in %d ms", retryCount + 1, MAX_RETRY_ATTEMPTS, delayMillis); From 054fc785042fd2b0ae31e446d0b410b26ee302f7 Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Thu, 2 Apr 2026 15:04:45 -0600 Subject: [PATCH 17/27] add redis option and more cache functionality --- .fernignore | 2 + build.gradle | 1 + .../schematic/api/cache/CacheProvider.java | 16 ++ .../com/schematic/api/cache/LocalCache.java | 74 ++++++++- .../schematic/api/cache/RedisCacheConfig.java | 150 +++++++++++++++++ .../api/cache/RedisCacheProvider.java | 153 ++++++++++++++++++ .../datastream/DataStreamCacheFactory.java | 92 +++++++++++ .../api/datastream/DataStreamClient.java | 24 ++- .../api/datastream/DatastreamOptions.java | 28 +++- .../java/com/schematic/api/TestCache.java | 46 ++++++ .../api/cache/RedisCacheConfigTest.java | 78 +++++++++ .../DataStreamCacheFactoryTest.java | 82 ++++++++++ 12 files changed, 729 insertions(+), 17 deletions(-) create mode 100644 src/main/java/com/schematic/api/cache/RedisCacheConfig.java create mode 100644 src/main/java/com/schematic/api/cache/RedisCacheProvider.java create mode 100644 src/main/java/com/schematic/api/datastream/DataStreamCacheFactory.java create mode 100644 src/test/java/com/schematic/api/cache/RedisCacheConfigTest.java create mode 100644 src/test/java/com/schematic/api/datastream/DataStreamCacheFactoryTest.java diff --git a/.fernignore b/.fernignore index d43ca82..8d2c2df 100644 --- a/.fernignore +++ b/.fernignore @@ -13,6 +13,8 @@ src/main/java/com/schematic/api/Schematic.java src/main/java/com/schematic/api/cache/CacheProvider.java src/main/java/com/schematic/api/cache/CachedItem.java src/main/java/com/schematic/api/cache/LocalCache.java +src/main/java/com/schematic/api/cache/RedisCacheConfig.java +src/main/java/com/schematic/api/cache/RedisCacheProvider.java src/main/java/com/schematic/api/core/NoOpHttpClient.java src/main/java/com/schematic/api/logger/ConsoleLogger.java src/main/java/com/schematic/api/datastream/ diff --git a/build.gradle b/build.gradle index 7136389..8600572 100644 --- a/build.gradle +++ b/build.gradle @@ -20,6 +20,7 @@ dependencies { api 'com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.18.6' implementation 'com.dylibso.chicory:runtime:1.4.0' implementation 'com.dylibso.chicory:wasi:1.4.0' + implementation 'redis.clients:jedis:5.2.0' testImplementation 'org.junit.jupiter:junit-jupiter-api:5.8.2' testImplementation 'org.junit.jupiter:junit-jupiter-engine:5.8.2' testImplementation 'org.junit.jupiter:junit-jupiter-params:5.8.2' diff --git a/src/main/java/com/schematic/api/cache/CacheProvider.java b/src/main/java/com/schematic/api/cache/CacheProvider.java index 93a8dcf..7c7a837 100644 --- a/src/main/java/com/schematic/api/cache/CacheProvider.java +++ b/src/main/java/com/schematic/api/cache/CacheProvider.java @@ -1,6 +1,7 @@ package com.schematic.api.cache; import java.time.Duration; +import java.util.List; public interface CacheProvider { T get(String key); @@ -8,4 +9,19 @@ public interface CacheProvider { void set(String key, T val, Duration ttlOverride); void set(String key, T val); + + /** + * Removes a cached entry by key. + */ + default void delete(String key) { + // no-op by default for backwards compatibility + } + + /** + * Deletes all cached entries whose keys are not in the provided list. + * Used to synchronize cache contents after receiving a full dataset from the server. + */ + default void deleteMissing(List keysToKeep) { + // no-op by default for backwards compatibility + } } diff --git a/src/main/java/com/schematic/api/cache/LocalCache.java b/src/main/java/com/schematic/api/cache/LocalCache.java index ac80ee5..b3be23a 100644 --- a/src/main/java/com/schematic/api/cache/LocalCache.java +++ b/src/main/java/com/schematic/api/cache/LocalCache.java @@ -2,19 +2,28 @@ import java.time.Duration; import java.time.Instant; +import java.util.ArrayList; +import java.util.HashSet; import java.util.LinkedList; +import java.util.List; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantLock; -public class LocalCache implements CacheProvider { +public class LocalCache implements CacheProvider, AutoCloseable { public static final int DEFAULT_CACHE_CAPACITY = 1000; public static final Duration DEFAULT_CACHE_TTL = Duration.ofMillis(5000); // 5000 milliseconds + private static final Duration CLEANUP_INTERVAL = Duration.ofSeconds(30); private final ConcurrentHashMap> cache; private final LinkedList lruList; private final ReentrantLock lock; private final int maxItems; private final Duration ttl; + private final ScheduledExecutorService cleanupScheduler; public LocalCache() { this(DEFAULT_CACHE_CAPACITY, DEFAULT_CACHE_TTL); @@ -26,6 +35,18 @@ public LocalCache(int maxItems, Duration ttl) { this.lock = new ReentrantLock(); this.maxItems = maxItems; this.ttl = ttl; + + // Start background cleanup thread for expired items + this.cleanupScheduler = Executors.newSingleThreadScheduledExecutor(r -> { + Thread t = new Thread(r, "schematic-cache-cleanup"); + t.setDaemon(true); + return t; + }); + this.cleanupScheduler.scheduleAtFixedRate( + this::removeExpiredItems, + CLEANUP_INTERVAL.getSeconds(), + CLEANUP_INTERVAL.getSeconds(), + TimeUnit.SECONDS); } @Override @@ -92,6 +113,57 @@ public void set(String key, T val) { set(key, val, null); } + @Override + public void deleteMissing(List keysToKeep) { + if (maxItems == 0) { + return; + } + + Set keepSet = keysToKeep != null ? new HashSet<>(keysToKeep) : new HashSet<>(); + + List keysToDelete; + lock.lock(); + try { + keysToDelete = new ArrayList<>(); + for (String key : cache.keySet()) { + if (!keepSet.contains(key)) { + keysToDelete.add(key); + } + } + } finally { + lock.unlock(); + } + + for (String key : keysToDelete) { + remove(key); + } + } + + @Override + public void delete(String key) { + remove(key); + } + + @Override + public void close() { + cleanupScheduler.shutdownNow(); + } + + private void removeExpiredItems() { + Instant now = Instant.now(); + List expired = new ArrayList<>(); + + for (ConcurrentHashMap.Entry> entry : cache.entrySet()) { + if (now.isAfter(entry.getValue().getExpiration())) { + expired.add(entry.getKey()); + } + } + + for (String key : expired) { + remove(key); + } + } + private void remove(String key) { CachedItem removedItem = cache.remove(key); if (removedItem != null) { diff --git a/src/main/java/com/schematic/api/cache/RedisCacheConfig.java b/src/main/java/com/schematic/api/cache/RedisCacheConfig.java new file mode 100644 index 0000000..98c1875 --- /dev/null +++ b/src/main/java/com/schematic/api/cache/RedisCacheConfig.java @@ -0,0 +1,150 @@ +package com.schematic.api.cache; + +import java.time.Duration; +import java.util.Collections; +import java.util.List; + +/** + * Configuration for connecting to Redis. Pass this to + * {@link com.schematic.api.datastream.DatastreamOptions.Builder#redisCache(RedisCacheConfig)} + * and the SDK will create and manage the Redis connection internally. + */ +public class RedisCacheConfig { + + private final List endpoints; + private final String username; + private final String password; + private final int database; + private final boolean ssl; + private final String keyPrefix; + private final Duration connectTimeout; + private final Duration readTimeout; + private final int maxPoolSize; + + private RedisCacheConfig(Builder builder) { + this.endpoints = builder.endpoints != null ? builder.endpoints : Collections.singletonList("localhost:6379"); + this.username = builder.username; + this.password = builder.password; + this.database = builder.database; + this.ssl = builder.ssl; + this.keyPrefix = builder.keyPrefix != null ? builder.keyPrefix : "schematic:"; + this.connectTimeout = builder.connectTimeout != null ? builder.connectTimeout : Duration.ofSeconds(5); + this.readTimeout = builder.readTimeout != null ? builder.readTimeout : Duration.ofSeconds(3); + this.maxPoolSize = builder.maxPoolSize > 0 ? builder.maxPoolSize : 8; + } + + public static Builder builder() { + return new Builder(); + } + + public List getEndpoints() { + return endpoints; + } + + public String getUsername() { + return username; + } + + public String getPassword() { + return password; + } + + public int getDatabase() { + return database; + } + + public boolean isSsl() { + return ssl; + } + + public String getKeyPrefix() { + return keyPrefix; + } + + public Duration getConnectTimeout() { + return connectTimeout; + } + + public Duration getReadTimeout() { + return readTimeout; + } + + public int getMaxPoolSize() { + return maxPoolSize; + } + + public static class Builder { + private List endpoints; + private String username; + private String password; + private int database; + private boolean ssl; + private String keyPrefix; + private Duration connectTimeout; + private Duration readTimeout; + private int maxPoolSize; + + /** + * Redis server endpoints in "host:port" format. Defaults to ["localhost:6379"]. + */ + public Builder endpoints(List endpoints) { + this.endpoints = endpoints; + return this; + } + + /** + * Single Redis server endpoint. Convenience for single-node setups. + */ + public Builder endpoint(String endpoint) { + this.endpoints = Collections.singletonList(endpoint); + return this; + } + + public Builder username(String username) { + this.username = username; + return this; + } + + public Builder password(String password) { + this.password = password; + return this; + } + + public Builder database(int database) { + this.database = database; + return this; + } + + public Builder ssl(boolean ssl) { + this.ssl = ssl; + return this; + } + + /** + * Key prefix for all Redis cache entries. Defaults to "schematic:". + */ + public Builder keyPrefix(String keyPrefix) { + this.keyPrefix = keyPrefix; + return this; + } + + public Builder connectTimeout(Duration connectTimeout) { + this.connectTimeout = connectTimeout; + return this; + } + + public Builder readTimeout(Duration readTimeout) { + this.readTimeout = readTimeout; + return this; + } + + public Builder maxPoolSize(int maxPoolSize) { + this.maxPoolSize = maxPoolSize; + return this; + } + + public RedisCacheConfig build() { + return new RedisCacheConfig(this); + } + } +} diff --git a/src/main/java/com/schematic/api/cache/RedisCacheProvider.java b/src/main/java/com/schematic/api/cache/RedisCacheProvider.java new file mode 100644 index 0000000..27e2e3d --- /dev/null +++ b/src/main/java/com/schematic/api/cache/RedisCacheProvider.java @@ -0,0 +1,153 @@ +package com.schematic.api.cache; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JavaType; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.schematic.api.core.ObjectMappers; +import java.time.Duration; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import redis.clients.jedis.JedisPooled; +import redis.clients.jedis.params.ScanParams; +import redis.clients.jedis.resps.ScanResult; + +/** + * Redis-backed cache provider using Jedis. + * + *

      Values are serialized as JSON strings. Requires a pre-configured {@link JedisPooled} + * instance, which handles connection pooling internally. + */ +public class RedisCacheProvider implements CacheProvider { + + private static final int SCAN_BATCH_SIZE = 100; + private static final int DELETE_BATCH_SIZE = 100; + + private final JedisPooled jedis; + private final long defaultTTLSeconds; + private final String keyPrefix; + private final ObjectMapper objectMapper; + private final JavaType valueType; + + /** + * Creates a Redis cache provider. + * + * @param jedis a pre-configured JedisPooled instance + * @param ttl default time-to-live for cached entries + * @param keyPrefix prefix for all Redis keys (e.g. "schematic:") + * @param valueClass the class of cached values (needed for JSON deserialization) + */ + public RedisCacheProvider(JedisPooled jedis, Duration ttl, String keyPrefix, Class valueClass) { + this.jedis = jedis; + this.defaultTTLSeconds = ttl != null ? ttl.getSeconds() : 0; + this.keyPrefix = keyPrefix != null ? keyPrefix : "schematic:"; + this.objectMapper = ObjectMappers.JSON_MAPPER; + this.valueType = objectMapper.constructType(valueClass); + } + + @Override + public T get(String key) { + String fullKey = getFullKey(key); + String value = jedis.get(fullKey); + if (value == null) { + return null; + } + try { + return objectMapper.readValue(value, valueType); + } catch (JsonProcessingException e) { + return null; + } + } + + @Override + public void set(String key, T val, Duration ttlOverride) { + String fullKey = getFullKey(key); + if (val == null) { + jedis.del(fullKey); + return; + } + + String json; + try { + json = objectMapper.writeValueAsString(val); + } catch (JsonProcessingException e) { + return; + } + + long ttl = ttlOverride != null ? ttlOverride.getSeconds() : defaultTTLSeconds; + if (ttl > 0) { + jedis.setex(fullKey, ttl, json); + } else { + jedis.set(fullKey, json); + } + } + + @Override + public void set(String key, T val) { + set(key, val, null); + } + + @Override + public void delete(String key) { + jedis.del(getFullKey(key)); + } + + @Override + public void deleteMissing(List keysToKeep) { + if (keysToKeep == null || keysToKeep.isEmpty()) { + return; + } + + // Build set of full keys to keep for O(1) lookup + Set keepSet = new HashSet<>(); + String scanPrefix = null; + for (String key : keysToKeep) { + String fullKey = getFullKey(key); + keepSet.add(fullKey); + // Extract prefix from first key for scanning + if (scanPrefix == null) { + int colonIdx = fullKey.indexOf(':'); + if (colonIdx >= 0) { + // Use the key prefix + the cache key prefix (e.g. "schematic:flags:") + int secondColon = fullKey.indexOf(':', colonIdx + 1); + if (secondColon >= 0) { + scanPrefix = fullKey.substring(0, secondColon + 1); + } else { + scanPrefix = fullKey.substring(0, colonIdx + 1); + } + } + } + } + + if (scanPrefix == null) { + return; + } + + // SCAN for all keys matching the prefix + ScanParams scanParams = new ScanParams().match(scanPrefix + "*").count(SCAN_BATCH_SIZE); + String cursor = ScanParams.SCAN_POINTER_START; + List keysToDelete = new ArrayList<>(); + + do { + ScanResult result = jedis.scan(cursor, scanParams); + for (String existingKey : result.getResult()) { + if (!keepSet.contains(existingKey)) { + keysToDelete.add(existingKey); + } + } + cursor = result.getCursor(); + } while (!cursor.equals(ScanParams.SCAN_POINTER_START)); + + // Delete in batches + for (int i = 0; i < keysToDelete.size(); i += DELETE_BATCH_SIZE) { + int end = Math.min(i + DELETE_BATCH_SIZE, keysToDelete.size()); + List batch = keysToDelete.subList(i, end); + jedis.del(batch.toArray(new String[0])); + } + } + + private String getFullKey(String key) { + return keyPrefix + key; + } +} diff --git a/src/main/java/com/schematic/api/datastream/DataStreamCacheFactory.java b/src/main/java/com/schematic/api/datastream/DataStreamCacheFactory.java new file mode 100644 index 0000000..dff24d8 --- /dev/null +++ b/src/main/java/com/schematic/api/datastream/DataStreamCacheFactory.java @@ -0,0 +1,92 @@ +package com.schematic.api.datastream; + +import com.schematic.api.cache.CacheProvider; +import com.schematic.api.cache.LocalCache; +import com.schematic.api.cache.RedisCacheConfig; +import com.schematic.api.cache.RedisCacheProvider; +import com.schematic.api.types.RulesengineCompany; +import com.schematic.api.types.RulesengineFlag; +import com.schematic.api.types.RulesengineUser; +import java.time.Duration; +import org.apache.commons.pool2.impl.GenericObjectPoolConfig; +import redis.clients.jedis.Connection; +import redis.clients.jedis.DefaultJedisClientConfig; +import redis.clients.jedis.HostAndPort; +import redis.clients.jedis.JedisPooled; + +/** + * Factory for creating cache providers used by the DataStream client. + * Handles Redis client creation and cache provider selection logic, + * keeping DatastreamOptions as a pure configuration holder. + */ +final class DataStreamCacheFactory { + + private static final int DEFAULT_CACHE_SIZE = 1_000; + private static final Duration MAX_CACHE_TTL = Duration.ofDays(30); + + private DataStreamCacheFactory() {} + + static CacheProvider buildFlagCache( + DatastreamOptions options, JedisPooled redisClient, String keyPrefix) { + if (options.getFlagCacheProvider() != null) { + return options.getFlagCacheProvider(); + } + // Flag cache uses the greater of maxCacheTTL or configured TTL + Duration flagTTL = options.getCacheTTL().compareTo(MAX_CACHE_TTL) > 0 ? options.getCacheTTL() : MAX_CACHE_TTL; + if (redisClient != null) { + return new RedisCacheProvider<>(redisClient, flagTTL, keyPrefix, RulesengineFlag.class); + } + return new LocalCache<>(DEFAULT_CACHE_SIZE, flagTTL); + } + + static CacheProvider buildCompanyCache( + DatastreamOptions options, JedisPooled redisClient, String keyPrefix) { + if (options.getCompanyCacheProvider() != null) { + return options.getCompanyCacheProvider(); + } + if (redisClient != null) { + return new RedisCacheProvider<>(redisClient, options.getCacheTTL(), keyPrefix, RulesengineCompany.class); + } + return new LocalCache<>(DEFAULT_CACHE_SIZE, options.getCacheTTL()); + } + + static CacheProvider buildUserCache( + DatastreamOptions options, JedisPooled redisClient, String keyPrefix) { + if (options.getUserCacheProvider() != null) { + return options.getUserCacheProvider(); + } + if (redisClient != null) { + return new RedisCacheProvider<>(redisClient, options.getCacheTTL(), keyPrefix, RulesengineUser.class); + } + return new LocalCache<>(DEFAULT_CACHE_SIZE, options.getCacheTTL()); + } + + static JedisPooled buildRedisClient(RedisCacheConfig config) { + if (config == null) { + return null; + } + + String endpoint = config.getEndpoints().get(0); + String[] parts = endpoint.split(":"); + String host = parts[0]; + int port = parts.length > 1 ? Integer.parseInt(parts[1]) : 6379; + + DefaultJedisClientConfig.Builder clientConfig = DefaultJedisClientConfig.builder() + .connectionTimeoutMillis((int) config.getConnectTimeout().toMillis()) + .socketTimeoutMillis((int) config.getReadTimeout().toMillis()) + .database(config.getDatabase()) + .ssl(config.isSsl()); + + if (config.getUsername() != null) { + clientConfig.user(config.getUsername()); + } + if (config.getPassword() != null) { + clientConfig.password(config.getPassword()); + } + + GenericObjectPoolConfig poolConfig = new GenericObjectPoolConfig<>(); + poolConfig.setMaxTotal(config.getMaxPoolSize()); + + return new JedisPooled(poolConfig, new HostAndPort(host, port), clientConfig.build()); + } +} diff --git a/src/main/java/com/schematic/api/datastream/DataStreamClient.java b/src/main/java/com/schematic/api/datastream/DataStreamClient.java index 2ce696b..3d3244a 100644 --- a/src/main/java/com/schematic/api/datastream/DataStreamClient.java +++ b/src/main/java/com/schematic/api/datastream/DataStreamClient.java @@ -94,9 +94,17 @@ public DataStreamClient( this.logger = logger; this.objectMapper = ObjectMappers.JSON_MAPPER; this.rulesEngine = rulesEngine; - this.flagCache = options.getFlagCacheProvider(); - this.companyCache = options.getCompanyCacheProvider(); - this.userCache = options.getUserCacheProvider(); + + // Build cache providers via factory: custom > Redis > local + redis.clients.jedis.JedisPooled redisClient = + DataStreamCacheFactory.buildRedisClient(options.getRedisCacheConfig()); + String keyPrefix = options.getRedisCacheConfig() != null + ? options.getRedisCacheConfig().getKeyPrefix() + : "schematic:"; + this.flagCache = DataStreamCacheFactory.buildFlagCache(options, redisClient, keyPrefix); + this.companyCache = DataStreamCacheFactory.buildCompanyCache(options, redisClient, keyPrefix); + this.userCache = DataStreamCacheFactory.buildUserCache(options, redisClient, keyPrefix); + this.httpClient = new OkHttpClient.Builder() .connectTimeout(5, TimeUnit.SECONDS) .readTimeout(5, TimeUnit.SECONDS) @@ -661,7 +669,7 @@ private void handleFlagMessage(DataStreamResp message, MessageType messageType) } else if (messageType == MessageType.DELETE) { String flagKey = data.has("key") ? data.get("key").asText() : null; if (flagKey != null) { - flagCache.set(FLAG_PREFIX + flagKey, null); + flagCache.delete(FLAG_PREFIX + flagKey); } } } @@ -703,10 +711,10 @@ private void handleCompanyMessage(DataStreamResp message, MessageType messageTyp RulesengineCompany existing = companyCache.get(COMPANY_PREFIX + entityId); if (existing != null) { for (Map.Entry entry : existing.getKeys().entrySet()) { - companyCache.set(companyCacheKey(entry.getKey(), entry.getValue()), null); + companyCache.delete(companyCacheKey(entry.getKey(), entry.getValue())); } } - companyCache.set(COMPANY_PREFIX + entityId, null); + companyCache.delete(COMPANY_PREFIX + entityId); } } } @@ -747,10 +755,10 @@ private void handleUserMessage(DataStreamResp message, MessageType messageType) RulesengineUser existing = userCache.get(USER_PREFIX + entityId); if (existing != null) { for (Map.Entry entry : existing.getKeys().entrySet()) { - userCache.set(userCacheKey(entry.getKey(), entry.getValue()), null); + userCache.delete(userCacheKey(entry.getKey(), entry.getValue())); } } - userCache.set(USER_PREFIX + entityId, null); + userCache.delete(USER_PREFIX + entityId); } } } diff --git a/src/main/java/com/schematic/api/datastream/DatastreamOptions.java b/src/main/java/com/schematic/api/datastream/DatastreamOptions.java index f693480..9ecbee2 100644 --- a/src/main/java/com/schematic/api/datastream/DatastreamOptions.java +++ b/src/main/java/com/schematic/api/datastream/DatastreamOptions.java @@ -1,7 +1,7 @@ package com.schematic.api.datastream; import com.schematic.api.cache.CacheProvider; -import com.schematic.api.cache.LocalCache; +import com.schematic.api.cache.RedisCacheConfig; import com.schematic.api.types.RulesengineCompany; import com.schematic.api.types.RulesengineFlag; import com.schematic.api.types.RulesengineUser; @@ -23,6 +23,7 @@ public class DatastreamOptions { private final boolean replicatorMode; private final String replicatorHealthUrl; private final Duration replicatorHealthCheckInterval; + private final RedisCacheConfig redisCacheConfig; private final CacheProvider flagCacheProvider; private final CacheProvider companyCacheProvider; private final CacheProvider userCacheProvider; @@ -35,13 +36,10 @@ private DatastreamOptions(Builder builder) { this.replicatorHealthCheckInterval = builder.replicatorHealthCheckInterval != null ? builder.replicatorHealthCheckInterval : DEFAULT_REPLICATOR_HEALTH_CHECK_INTERVAL; - this.flagCacheProvider = - builder.flagCacheProvider != null ? builder.flagCacheProvider : new LocalCache<>(1_000, this.cacheTTL); - this.companyCacheProvider = builder.companyCacheProvider != null - ? builder.companyCacheProvider - : new LocalCache<>(1_000, this.cacheTTL); - this.userCacheProvider = - builder.userCacheProvider != null ? builder.userCacheProvider : new LocalCache<>(1_000, this.cacheTTL); + this.redisCacheConfig = builder.redisCacheConfig; + this.flagCacheProvider = builder.flagCacheProvider; + this.companyCacheProvider = builder.companyCacheProvider; + this.userCacheProvider = builder.userCacheProvider; } public static Builder builder() { @@ -64,6 +62,10 @@ public Duration getReplicatorHealthCheckInterval() { return replicatorHealthCheckInterval; } + public RedisCacheConfig getRedisCacheConfig() { + return redisCacheConfig; + } + public CacheProvider getFlagCacheProvider() { return flagCacheProvider; } @@ -84,6 +86,7 @@ public static class Builder { private CacheProvider flagCacheProvider; private CacheProvider companyCacheProvider; private CacheProvider userCacheProvider; + private RedisCacheConfig redisCacheConfig; public Builder cacheTTL(Duration cacheTTL) { this.cacheTTL = cacheTTL; @@ -120,6 +123,15 @@ public Builder userCacheProvider(CacheProvider userCacheProvide return this; } + /** + * Configures Redis-backed caching. The SDK creates and manages the Redis connection + * internally. Required for replicator mode. + */ + public Builder redisCache(RedisCacheConfig config) { + this.redisCacheConfig = config; + return this; + } + /** * Convenience method to enable replicator mode with a health check URL. */ diff --git a/src/test/java/com/schematic/api/TestCache.java b/src/test/java/com/schematic/api/TestCache.java index 2247610..5869735 100644 --- a/src/test/java/com/schematic/api/TestCache.java +++ b/src/test/java/com/schematic/api/TestCache.java @@ -7,6 +7,7 @@ import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Objects; import java.util.concurrent.CountDownLatch; @@ -274,6 +275,51 @@ void testCacheWithDifferentValueTypes() { assertEquals(Arrays.asList("tag1", "tag2"), retrieved.tags); } + @Test + void testDeleteMissing_removesUnlistedKeys() { + LocalCache cacheProvider = new LocalCache<>(10, Duration.ofMinutes(5)); + + cacheProvider.set("key1", "val1"); + cacheProvider.set("key2", "val2"); + cacheProvider.set("key3", "val3"); + + cacheProvider.deleteMissing(Arrays.asList("key1")); + + assertEquals("val1", cacheProvider.get("key1")); + assertNull(cacheProvider.get("key2")); + assertNull(cacheProvider.get("key3")); + } + + @Test + void testDeleteMissing_withEmptyList() { + LocalCache cacheProvider = new LocalCache<>(10, Duration.ofMinutes(5)); + + cacheProvider.set("key1", "val1"); + cacheProvider.set("key2", "val2"); + + cacheProvider.deleteMissing(Collections.emptyList()); + + assertNull(cacheProvider.get("key1")); + assertNull(cacheProvider.get("key2")); + } + + @Test + void testDeleteMissing_withNullList() { + LocalCache cacheProvider = new LocalCache<>(10, Duration.ofMinutes(5)); + + cacheProvider.set("key1", "val1"); + + assertDoesNotThrow(() -> cacheProvider.deleteMissing(null)); + assertNull(cacheProvider.get("key1")); + } + + @Test + void testDeleteMissing_zeroCapacity() { + LocalCache cacheProvider = new LocalCache<>(0, Duration.ofMinutes(5)); + + assertDoesNotThrow(() -> cacheProvider.deleteMissing(Arrays.asList("key1"))); + } + /** Simple value object for testing complex types in the cache. */ private static class TestComplexObject { final String name; diff --git a/src/test/java/com/schematic/api/cache/RedisCacheConfigTest.java b/src/test/java/com/schematic/api/cache/RedisCacheConfigTest.java new file mode 100644 index 0000000..630d09d --- /dev/null +++ b/src/test/java/com/schematic/api/cache/RedisCacheConfigTest.java @@ -0,0 +1,78 @@ +package com.schematic.api.cache; + +import static org.junit.jupiter.api.Assertions.*; + +import java.time.Duration; +import java.util.Arrays; +import org.junit.jupiter.api.Test; + +class RedisCacheConfigTest { + + @Test + void defaults() { + RedisCacheConfig config = RedisCacheConfig.builder().build(); + + assertEquals("localhost:6379", config.getEndpoints().get(0)); + assertEquals("schematic:", config.getKeyPrefix()); + assertEquals(0, config.getDatabase()); + assertFalse(config.isSsl()); + assertNull(config.getUsername()); + assertNull(config.getPassword()); + assertEquals(Duration.ofSeconds(5), config.getConnectTimeout()); + assertEquals(Duration.ofSeconds(3), config.getReadTimeout()); + assertEquals(8, config.getMaxPoolSize()); + } + + @Test + void singleEndpoint() { + RedisCacheConfig config = + RedisCacheConfig.builder().endpoint("redis.example.com:6380").build(); + + assertEquals(1, config.getEndpoints().size()); + assertEquals("redis.example.com:6380", config.getEndpoints().get(0)); + } + + @Test + void multipleEndpoints() { + RedisCacheConfig config = RedisCacheConfig.builder() + .endpoints(Arrays.asList("node1:6379", "node2:6379")) + .build(); + + assertEquals(2, config.getEndpoints().size()); + } + + @Test + void authConfig() { + RedisCacheConfig config = RedisCacheConfig.builder() + .username("myuser") + .password("mypass") + .database(2) + .ssl(true) + .build(); + + assertEquals("myuser", config.getUsername()); + assertEquals("mypass", config.getPassword()); + assertEquals(2, config.getDatabase()); + assertTrue(config.isSsl()); + } + + @Test + void customKeyPrefix() { + RedisCacheConfig config = RedisCacheConfig.builder().keyPrefix("myapp:").build(); + + assertEquals("myapp:", config.getKeyPrefix()); + } + + @Test + void customTimeouts() { + RedisCacheConfig config = RedisCacheConfig.builder() + .connectTimeout(Duration.ofSeconds(10)) + .readTimeout(Duration.ofSeconds(8)) + .maxPoolSize(16) + .build(); + + assertEquals(Duration.ofSeconds(10), config.getConnectTimeout()); + assertEquals(Duration.ofSeconds(8), config.getReadTimeout()); + assertEquals(16, config.getMaxPoolSize()); + } +} diff --git a/src/test/java/com/schematic/api/datastream/DataStreamCacheFactoryTest.java b/src/test/java/com/schematic/api/datastream/DataStreamCacheFactoryTest.java new file mode 100644 index 0000000..548df5b --- /dev/null +++ b/src/test/java/com/schematic/api/datastream/DataStreamCacheFactoryTest.java @@ -0,0 +1,82 @@ +package com.schematic.api.datastream; + +import static org.junit.jupiter.api.Assertions.*; + +import com.schematic.api.cache.CacheProvider; +import com.schematic.api.cache.LocalCache; +import com.schematic.api.cache.RedisCacheConfig; +import com.schematic.api.types.RulesengineCompany; +import com.schematic.api.types.RulesengineFlag; +import com.schematic.api.types.RulesengineUser; +import java.time.Duration; +import org.junit.jupiter.api.Test; + +class DataStreamCacheFactoryTest { + + @Test + void buildsLocalCacheWhenNoRedisConfig() { + DatastreamOptions options = DatastreamOptions.builder().build(); + + CacheProvider flagCache = DataStreamCacheFactory.buildFlagCache(options, null, "schematic:"); + CacheProvider companyCache = + DataStreamCacheFactory.buildCompanyCache(options, null, "schematic:"); + CacheProvider userCache = DataStreamCacheFactory.buildUserCache(options, null, "schematic:"); + + assertNotNull(flagCache); + assertNotNull(companyCache); + assertNotNull(userCache); + assertTrue(flagCache instanceof LocalCache); + assertTrue(companyCache instanceof LocalCache); + assertTrue(userCache instanceof LocalCache); + } + + @Test + void respectsCustomFlagCacheProvider() { + LocalCache customCache = new LocalCache<>(500, Duration.ofMinutes(1)); + DatastreamOptions options = + DatastreamOptions.builder().flagCacheProvider(customCache).build(); + + CacheProvider flagCache = DataStreamCacheFactory.buildFlagCache(options, null, "schematic:"); + + assertSame(customCache, flagCache); + } + + @Test + void respectsCustomCompanyCacheProvider() { + LocalCache customCache = new LocalCache<>(500, Duration.ofMinutes(1)); + DatastreamOptions options = + DatastreamOptions.builder().companyCacheProvider(customCache).build(); + + CacheProvider companyCache = + DataStreamCacheFactory.buildCompanyCache(options, null, "schematic:"); + + assertSame(customCache, companyCache); + } + + @Test + void respectsCustomUserCacheProvider() { + LocalCache customCache = new LocalCache<>(500, Duration.ofMinutes(1)); + DatastreamOptions options = + DatastreamOptions.builder().userCacheProvider(customCache).build(); + + CacheProvider userCache = DataStreamCacheFactory.buildUserCache(options, null, "schematic:"); + + assertSame(customCache, userCache); + } + + @Test + void buildRedisClient_returnsNullForNullConfig() { + assertNull(DataStreamCacheFactory.buildRedisClient(null)); + } + + @Test + void redisCacheConfig_storedInOptions() { + RedisCacheConfig redisConfig = + RedisCacheConfig.builder().endpoint("redis:6379").build(); + DatastreamOptions options = + DatastreamOptions.builder().redisCache(redisConfig).build(); + + assertNotNull(options.getRedisCacheConfig()); + assertEquals("redis:6379", options.getRedisCacheConfig().getEndpoints().get(0)); + } +} From 51a726eaf0d05b8e6b7cfa854141ea8d6b4b73e8 Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Thu, 2 Apr 2026 15:04:58 -0600 Subject: [PATCH 18/27] more parity updates --- src/main/java/com/schematic/api/Schematic.java | 14 ++++++++++++++ .../api/datastream/DataStreamWebSocketClient.java | 4 ++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/src/main/java/com/schematic/api/Schematic.java b/src/main/java/com/schematic/api/Schematic.java index fd59a3d..d3b2bd6 100644 --- a/src/main/java/com/schematic/api/Schematic.java +++ b/src/main/java/com/schematic/api/Schematic.java @@ -214,6 +214,20 @@ public boolean isOffline() { return this.offline; } + /** + * Sets a default value for a specific flag, used when the API is unavailable or in offline mode. + */ + public void setFlagDefault(String flagKey, boolean value) { + flagDefaults.put(flagKey, value); + } + + /** + * Sets default values for multiple flags at once. + */ + public void setFlagDefaults(Map defaults) { + flagDefaults.putAll(defaults); + } + /** * Returns the DataStream client, or null if datastream is not configured. */ diff --git a/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java b/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java index 411626e..27e2d5f 100644 --- a/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java +++ b/src/main/java/com/schematic/api/datastream/DataStreamWebSocketClient.java @@ -42,8 +42,8 @@ public interface ConnectionReadyHandler { } // Default constants - private static final long DEFAULT_PONG_WAIT_MS = 60_000; - private static final long DEFAULT_PING_PERIOD_MS = 54_000; + private static final long DEFAULT_PONG_WAIT_MS = 30_000; + private static final long DEFAULT_PING_PERIOD_MS = 27_000; private static final int DEFAULT_MAX_RECONNECT_ATTEMPTS = 10; private static final long DEFAULT_MIN_RECONNECT_DELAY_MS = 1_000; private static final long DEFAULT_MAX_RECONNECT_DELAY_MS = 30_000; From e505055cd41799cd6716ed586a921400d3106784 Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Thu, 2 Apr 2026 15:05:07 -0600 Subject: [PATCH 19/27] add docs --- README.md | 129 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 129 insertions(+) diff --git a/README.md b/README.md index ac18c66..578350e 100644 --- a/README.md +++ b/README.md @@ -159,6 +159,135 @@ user.put("user_id", "your-user-id"); boolean flagValue = schematic.checkFlag("some-flag-key", company, user); ``` +## DataStream + +DataStream enables local flag evaluation by maintaining a WebSocket connection to Schematic and caching flag rules, company, and user data locally. This reduces latency and network calls for flag checks. + +### Key Features +- **Real-Time Updates**: Automatically updates cached data when changes occur on the backend. +- **Configurable Caching**: Supports both in-memory (local) caching and Redis-based caching. +- **Efficient Flag Checks**: Flag evaluation happens locally using a WASM rules engine. + +### Setup + +```java +import com.schematic.api.Schematic; +import com.schematic.api.datastream.DatastreamOptions; + +Schematic schematic = Schematic.builder() + .apiKey("YOUR_API_KEY") + .datastreamOptions(DatastreamOptions.builder() + .build()) + .build(); + +// Flag checks are now evaluated locally +boolean flagValue = schematic.checkFlag("some-flag-key", company, user); + +// When done, close the client to release resources +schematic.close(); +``` + +### Configuration Options + +| Option | Type | Default | Description | +|---|---|---|---| +| `cacheTTL` | `Duration` | 24 hours | Cache TTL for flag/company/user data | +| `redisCache` | `RedisCacheConfig` | — | Redis connection config (uses in-memory cache if not provided) | + +### Configuring Redis Cache + +DataStream supports Redis for caching, which is required for [Replicator Mode](#replicator-mode). Pass a `RedisCacheConfig` and the SDK will create and manage the Redis connection internally: + +```java +import com.schematic.api.Schematic; +import com.schematic.api.cache.RedisCacheConfig; +import com.schematic.api.datastream.DatastreamOptions; +import java.time.Duration; + +Schematic schematic = Schematic.builder() + .apiKey("YOUR_API_KEY") + .datastreamOptions(DatastreamOptions.builder() + .redisCache(RedisCacheConfig.builder() + .endpoint("localhost:6379") + .keyPrefix("schematic:") + .build()) + .cacheTTL(Duration.ofMinutes(5)) + .build()) + .build(); +``` + +#### Redis Configuration Options + +| Option | Type | Default | Description | +|---|---|---|---| +| `endpoint` | `String` | `localhost:6379` | Redis server address in `host:port` format | +| `endpoints` | `List` | `["localhost:6379"]` | Multiple endpoints (for future cluster support) | +| `username` | `String` | — | Redis 6.0+ ACL username | +| `password` | `String` | — | Redis password | +| `database` | `int` | `0` | Redis database index | +| `ssl` | `boolean` | `false` | Enable SSL/TLS | +| `keyPrefix` | `String` | `schematic:` | Prefix for all Redis cache keys | +| `connectTimeout` | `Duration` | 5 seconds | Connection timeout | +| `readTimeout` | `Duration` | 3 seconds | Read timeout | +| `maxPoolSize` | `int` | 8 | Maximum connection pool size | + +## Replicator Mode + +Replicator mode is designed for environments where a separate process (the [schematic-datastream-replicator](https://github.com/SchematicHQ/schematic-datastream-replicator)) manages the WebSocket connection and populates a shared Redis cache. The SDK reads from that cache and evaluates flags locally without establishing its own WebSocket connection. + +### Requirements + +Replicator mode **requires Redis** as a shared cache so the SDK can read data written by the external replicator process. An in-memory cache will not work since the replicator and SDK run in separate processes. + +### Setup + +```java +import com.schematic.api.Schematic; +import com.schematic.api.cache.RedisCacheConfig; +import com.schematic.api.datastream.DatastreamOptions; + +Schematic schematic = Schematic.builder() + .apiKey("YOUR_API_KEY") + .datastreamOptions(DatastreamOptions.builder() + .redisCache(RedisCacheConfig.builder() + .endpoint("localhost:6379") + .build()) + .withReplicatorMode("http://localhost:8090/ready") + .build()) + .build(); +``` + +### Configuration Options + +| Option | Type | Default | Description | +|---|---|---|---| +| `withReplicatorMode` | `String` | — | Enables replicator mode with the given health check URL | +| `redisCache` | `RedisCacheConfig` | — | **Required.** Redis connection config for the shared cache | +| `replicatorHealthCheckInterval` | `Duration` | 30 seconds | Health check polling interval | +| `cacheTTL` | `Duration` | 24 hours | Cache TTL (should match the replicator's TTL) | + +### Cache TTL Configuration + +**Important:** When using Replicator Mode, you should set the SDK's cache TTL to match the replicator's cache TTL. The replicator defaults to an unlimited cache TTL. If the SDK uses a shorter TTL (the default is 24 hours), locally updated cache entries (e.g. after track events) will be written back with the shorter TTL and eventually evicted from the shared Redis cache, even though the replicator originally set them with no expiration. + +To match the replicator's default unlimited TTL: + +```java +DatastreamOptions.builder() + .redisCache(RedisCacheConfig.builder() + .endpoint("localhost:6379") + .build()) + .withReplicatorMode("http://localhost:8090/ready") + .cacheTTL(Duration.ZERO) // Unlimited, matching the replicator default + .build() +``` + +When running in Replicator Mode, the client will: +- Skip establishing WebSocket connections +- Periodically check if the replicator service is ready +- Use cached data populated by the external replicator service +- Fall back to direct API calls if the replicator is not available + ## Webhook Verification Schematic can send webhooks to notify your application of events. To ensure the security of these webhooks, Schematic signs each request using HMAC-SHA256. The Java SDK provides utility functions to verify these signatures. From 8d9b74f46b3988ef23f0196487322e3d118dccb1 Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Thu, 2 Apr 2026 16:56:21 -0600 Subject: [PATCH 20/27] remove unused functions --- src/main/java/com/schematic/api/Schematic.java | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/src/main/java/com/schematic/api/Schematic.java b/src/main/java/com/schematic/api/Schematic.java index d3b2bd6..fd59a3d 100644 --- a/src/main/java/com/schematic/api/Schematic.java +++ b/src/main/java/com/schematic/api/Schematic.java @@ -214,20 +214,6 @@ public boolean isOffline() { return this.offline; } - /** - * Sets a default value for a specific flag, used when the API is unavailable or in offline mode. - */ - public void setFlagDefault(String flagKey, boolean value) { - flagDefaults.put(flagKey, value); - } - - /** - * Sets default values for multiple flags at once. - */ - public void setFlagDefaults(Map defaults) { - flagDefaults.putAll(defaults); - } - /** * Returns the DataStream client, or null if datastream is not configured. */ From c00b4bbd9d7ec80fa52a2f4a50330a2e4afc79b7 Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Thu, 2 Apr 2026 17:01:04 -0600 Subject: [PATCH 21/27] add shared cleanup scheduler for local caches --- .../com/schematic/api/cache/LocalCache.java | 26 ++++++++++++------- .../api/datastream/DataStreamClient.java | 1 - 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/src/main/java/com/schematic/api/cache/LocalCache.java b/src/main/java/com/schematic/api/cache/LocalCache.java index b3be23a..d7b7968 100644 --- a/src/main/java/com/schematic/api/cache/LocalCache.java +++ b/src/main/java/com/schematic/api/cache/LocalCache.java @@ -18,12 +18,19 @@ public class LocalCache implements CacheProvider, AutoCloseable { public static final Duration DEFAULT_CACHE_TTL = Duration.ofMillis(5000); // 5000 milliseconds private static final Duration CLEANUP_INTERVAL = Duration.ofSeconds(30); + // Shared scheduler for background cleanup across all LocalCache instances + private static final ScheduledExecutorService SHARED_CLEANUP_SCHEDULER = + Executors.newSingleThreadScheduledExecutor(r -> { + Thread t = new Thread(r, "schematic-cache-cleanup"); + t.setDaemon(true); + return t; + }); + private final ConcurrentHashMap> cache; private final LinkedList lruList; private final ReentrantLock lock; private final int maxItems; private final Duration ttl; - private final ScheduledExecutorService cleanupScheduler; public LocalCache() { this(DEFAULT_CACHE_CAPACITY, DEFAULT_CACHE_TTL); @@ -36,13 +43,8 @@ public LocalCache(int maxItems, Duration ttl) { this.maxItems = maxItems; this.ttl = ttl; - // Start background cleanup thread for expired items - this.cleanupScheduler = Executors.newSingleThreadScheduledExecutor(r -> { - Thread t = new Thread(r, "schematic-cache-cleanup"); - t.setDaemon(true); - return t; - }); - this.cleanupScheduler.scheduleAtFixedRate( + // Schedule background cleanup on the shared thread + SHARED_CLEANUP_SCHEDULER.scheduleAtFixedRate( this::removeExpiredItems, CLEANUP_INTERVAL.getSeconds(), CLEANUP_INTERVAL.getSeconds(), @@ -146,7 +148,13 @@ public void delete(String key) { @Override public void close() { - cleanupScheduler.shutdownNow(); + cache.clear(); + lock.lock(); + try { + lruList.clear(); + } finally { + lock.unlock(); + } } private void removeExpiredItems() { diff --git a/src/main/java/com/schematic/api/datastream/DataStreamClient.java b/src/main/java/com/schematic/api/datastream/DataStreamClient.java index 3d3244a..0aeaec9 100644 --- a/src/main/java/com/schematic/api/datastream/DataStreamClient.java +++ b/src/main/java/com/schematic/api/datastream/DataStreamClient.java @@ -199,7 +199,6 @@ public RulesengineCheckFlagResult checkFlag(String flagKey, Map /** * Fetches a company via the datastream WebSocket, waiting for the response with a timeout. * Deduplicates concurrent requests for the same entity. - * Uses futures with timeout to wait for the response, deduplicating concurrent requests. */ private RulesengineCompany getCompany(Map keys) { // Check cache first From c17d21fc68738a5e4994fac6500df62b8489e9fb Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Thu, 2 Apr 2026 17:05:15 -0600 Subject: [PATCH 22/27] udpate sample app --- sample-app/src/main/java/sample/App.java | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/sample-app/src/main/java/sample/App.java b/sample-app/src/main/java/sample/App.java index bb9aa0c..f9d03f8 100644 --- a/sample-app/src/main/java/sample/App.java +++ b/sample-app/src/main/java/sample/App.java @@ -9,6 +9,8 @@ * - SCHEMATIC_API_URL: Schematic API base URL (default: https://api.schematichq.com) * - SERVER_PORT: Port to listen on (default: 8080) * - CACHE_TTL_MS: Cache TTL in milliseconds (default: 5000) + * - REDIS_URL: Redis server endpoint in host:port format (default: localhost:6379) + * - REDIS_PASSWORD: Redis password (optional) * - REPLICATOR_HEALTH_URL: Replicator health check URL (default: http://localhost:8090/ready) * - USE_REPLICATOR: Set to "true" to enable replicator mode (default: false) * @@ -30,6 +32,7 @@ * Replicator mode example: * export SCHEMATIC_API_KEY="your-key" * export USE_REPLICATOR=true + * export REDIS_URL="localhost:6379" * export REPLICATOR_HEALTH_URL="http://localhost:8090/ready" * ./gradlew :sample-app:run */ @@ -37,8 +40,9 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.schematic.api.Schematic; -import com.schematic.api.types.RulesengineCheckFlagResult; +import com.schematic.api.cache.RedisCacheConfig; import com.schematic.api.datastream.DatastreamOptions; +import com.schematic.api.types.RulesengineCheckFlagResult; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpServer; import java.io.IOException; @@ -60,6 +64,7 @@ public final class App { private static int serverPort; private static boolean useReplicator; private static String replicatorHealthUrl; + private static String redisUrl; public static void main(String[] args) throws Exception { String apiKey = System.getenv("SCHEMATIC_API_KEY"); @@ -71,12 +76,21 @@ public static void main(String[] args) throws Exception { schematicApiUrl = envOrDefault("SCHEMATIC_API_URL", "https://api.schematichq.com"); serverPort = Integer.parseInt(envOrDefault("SERVER_PORT", "8080")); cacheTtlMs = Integer.parseInt(envOrDefault("CACHE_TTL_MS", "5000")); + redisUrl = envOrDefault("REDIS_URL", "localhost:6379"); useReplicator = "true".equalsIgnoreCase(envOrDefault("USE_REPLICATOR", "false")); replicatorHealthUrl = envOrDefault("REPLICATOR_HEALTH_URL", "http://localhost:8090/ready"); - // Configure DataStream options - DatastreamOptions.Builder datastreamBuilder = - DatastreamOptions.builder().cacheTTL(Duration.ofMillis(cacheTtlMs)); + // Configure Redis cache + RedisCacheConfig.Builder redisBuilder = RedisCacheConfig.builder().endpoint(redisUrl); + String redisPassword = System.getenv("REDIS_PASSWORD"); + if (redisPassword != null && !redisPassword.isEmpty()) { + redisBuilder.password(redisPassword); + } + + // Configure DataStream options with Redis + DatastreamOptions.Builder datastreamBuilder = DatastreamOptions.builder() + .cacheTTL(Duration.ofMillis(cacheTtlMs)) + .redisCache(redisBuilder.build()); if (useReplicator) { datastreamBuilder.withReplicatorMode(replicatorHealthUrl); @@ -101,6 +115,7 @@ public static void main(String[] args) throws Exception { System.out.println("Datastream Test Server started on port " + serverPort); System.out.println("Mode: " + (useReplicator ? "replicator" : "direct datastream")); + System.out.println("Redis: " + redisUrl); System.out.println("Endpoints:"); System.out.println(" GET / - Welcome message"); System.out.println(" GET /config - Show configuration"); @@ -127,6 +142,7 @@ private static void handleConfig(HttpExchange exchange) throws IOException { config.put("schematicApiUrl", schematicApiUrl); config.put("cacheTtlMs", cacheTtlMs); config.put("hasApiKey", true); + config.put("redisUrl", redisUrl); config.put("replicatorMode", schematic.isReplicatorMode()); if (useReplicator) { config.put("replicatorHealthUrl", replicatorHealthUrl); From fa599da80fc6b26c0da96c277d5309d81609f463 Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Fri, 3 Apr 2026 09:57:56 -0600 Subject: [PATCH 23/27] setup sample app to work as testapp --- sample-app/src/main/java/sample/App.java | 452 ++++++++++-------- .../java/com/schematic/api/EventBuffer.java | 9 +- .../com/schematic/api/HttpEventSender.java | 31 +- .../java/com/schematic/api/Schematic.java | 10 + 4 files changed, 296 insertions(+), 206 deletions(-) diff --git a/sample-app/src/main/java/sample/App.java b/sample-app/src/main/java/sample/App.java index f9d03f8..6aa2b47 100644 --- a/sample-app/src/main/java/sample/App.java +++ b/sample-app/src/main/java/sample/App.java @@ -1,254 +1,308 @@ /* - * Schematic Java Client - Datastream Test Server + * Schematic Java SDK - E2E Test App * - * This example demonstrates how to use the Schematic Java client to check feature flags - * with DataStream support, including replicator mode. - * - * Environment Variables: - * - SCHEMATIC_API_KEY: Your Schematic API key (required) - * - SCHEMATIC_API_URL: Schematic API base URL (default: https://api.schematichq.com) - * - SERVER_PORT: Port to listen on (default: 8080) - * - CACHE_TTL_MS: Cache TTL in milliseconds (default: 5000) - * - REDIS_URL: Redis server endpoint in host:port format (default: localhost:6379) - * - REDIS_PASSWORD: Redis password (optional) - * - REPLICATOR_HEALTH_URL: Replicator health check URL (default: http://localhost:8090/ready) - * - USE_REPLICATOR: Set to "true" to enable replicator mode (default: false) + * HTTP server implementing the shared E2E test app contract. + * The SDK client is initialized lazily via POST /configure. * * Usage: - * 1. Set environment variables (only SCHEMATIC_API_KEY is required) - * 2. Run: ./gradlew :sample-app:run - * 3. Test endpoints: - * - GET / - Welcome message - * - GET /config - Show current configuration - * - GET /health - Health check with datastream status - * - GET /datastream-status - DataStream/replicator connection status - * - POST /checkflag - Check a feature flag - * - * Example checkflag request: - * curl -X POST http://localhost:8080/checkflag \ - * -H "Content-Type: application/json" \ - * -d '{"flag-key":"my-flag","company":{"id":"comp-123"},"user":{"id":"user-456"}}' - * - * Replicator mode example: - * export SCHEMATIC_API_KEY="your-key" - * export USE_REPLICATOR=true - * export REDIS_URL="localhost:6379" - * export REPLICATOR_HEALTH_URL="http://localhost:8090/ready" * ./gradlew :sample-app:run + * + * Endpoints: + * GET /health - Returns {"status":"waiting"} or {"status":"configured"} + * POST /configure - Initialize SDK client with config + * POST /check-flag - Check a feature flag + * POST /identify - Submit identify event + * POST /track - Submit track event + * POST /set-flag-default - Set a flag default value */ package sample; import com.fasterxml.jackson.databind.ObjectMapper; import com.schematic.api.Schematic; +import com.schematic.api.cache.LocalCache; import com.schematic.api.cache.RedisCacheConfig; import com.schematic.api.datastream.DatastreamOptions; +import com.schematic.api.types.CheckFlagRequestBody; +import com.schematic.api.types.EventBodyIdentify; +import com.schematic.api.types.EventBodyIdentifyCompany; +import com.schematic.api.types.EventBodyTrack; import com.schematic.api.types.RulesengineCheckFlagResult; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpServer; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; +import java.io.PrintWriter; +import java.io.StringWriter; import java.net.InetSocketAddress; import java.time.Duration; -import java.time.Instant; +import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; public final class App { private static final ObjectMapper MAPPER = new ObjectMapper(); + private static final int CACHE_TTL_MS = 2000; - private static Schematic schematic; - private static String schematicApiUrl; - private static int cacheTtlMs; - private static int serverPort; - private static boolean useReplicator; - private static String replicatorHealthUrl; - private static String redisUrl; + private static volatile Schematic client; + private static volatile Map currentConfig; public static void main(String[] args) throws Exception { - String apiKey = System.getenv("SCHEMATIC_API_KEY"); - if (apiKey == null || apiKey.isEmpty()) { - System.err.println("ERROR: SCHEMATIC_API_KEY environment variable is not set"); - System.exit(1); - } - - schematicApiUrl = envOrDefault("SCHEMATIC_API_URL", "https://api.schematichq.com"); - serverPort = Integer.parseInt(envOrDefault("SERVER_PORT", "8080")); - cacheTtlMs = Integer.parseInt(envOrDefault("CACHE_TTL_MS", "5000")); - redisUrl = envOrDefault("REDIS_URL", "localhost:6379"); - useReplicator = "true".equalsIgnoreCase(envOrDefault("USE_REPLICATOR", "false")); - replicatorHealthUrl = envOrDefault("REPLICATOR_HEALTH_URL", "http://localhost:8090/ready"); - - // Configure Redis cache - RedisCacheConfig.Builder redisBuilder = RedisCacheConfig.builder().endpoint(redisUrl); - String redisPassword = System.getenv("REDIS_PASSWORD"); - if (redisPassword != null && !redisPassword.isEmpty()) { - redisBuilder.password(redisPassword); - } - - // Configure DataStream options with Redis - DatastreamOptions.Builder datastreamBuilder = DatastreamOptions.builder() - .cacheTTL(Duration.ofMillis(cacheTtlMs)) - .redisCache(redisBuilder.build()); - - if (useReplicator) { - datastreamBuilder.withReplicatorMode(replicatorHealthUrl); - } - - DatastreamOptions datastreamOptions = datastreamBuilder.build(); - - schematic = Schematic.builder() - .apiKey(apiKey) - .basePath(schematicApiUrl) - .datastreamOptions(datastreamOptions) - .build(); + int port = Integer.parseInt(System.getenv().getOrDefault("PORT", "8080")); - HttpServer server = HttpServer.create(new InetSocketAddress(serverPort), 0); - server.createContext("/", App::handleRoot); - server.createContext("/config", App::handleConfig); + HttpServer server = HttpServer.create(new InetSocketAddress(port), 0); server.createContext("/health", App::handleHealth); - server.createContext("/datastream-status", App::handleDatastreamStatus); - server.createContext("/checkflag", App::handleCheckFlag); + server.createContext("/configure", App::handleConfigure); + server.createContext("/check-flag", App::handleCheckFlag); + server.createContext("/identify", App::handleIdentify); + server.createContext("/track", App::handleTrack); + server.createContext("/set-flag-default", App::handleSetFlagDefault); server.setExecutor(null); server.start(); - System.out.println("Datastream Test Server started on port " + serverPort); - System.out.println("Mode: " + (useReplicator ? "replicator" : "direct datastream")); - System.out.println("Redis: " + redisUrl); - System.out.println("Endpoints:"); - System.out.println(" GET / - Welcome message"); - System.out.println(" GET /config - Show configuration"); - System.out.println(" GET /health - Health check"); - System.out.println(" GET /datastream-status - DataStream connection status"); - System.out.println(" POST /checkflag - Check a feature flag"); + System.out.println("E2E test app listening on port " + port); } - private static void handleRoot(HttpExchange exchange) throws IOException { + private static void handleHealth(HttpExchange exchange) throws IOException { if (!"GET".equals(exchange.getRequestMethod())) { - sendMethodNotAllowed(exchange); + sendJson(exchange, 405, Map.of("error", "Method Not Allowed")); return; } - sendText(exchange, 200, "Welcome to the Schematic Datastream Test Server!"); + + Map response = new LinkedHashMap<>(); + if (client == null) { + response.put("status", "waiting"); + } else { + response.put("status", "configured"); + response.put("config", currentConfig); + response.put("cacheTtlMs", CACHE_TTL_MS); + } + sendJson(exchange, 200, response); } - private static void handleConfig(HttpExchange exchange) throws IOException { - if (!"GET".equals(exchange.getRequestMethod())) { - sendMethodNotAllowed(exchange); + @SuppressWarnings("unchecked") + private static void handleConfigure(HttpExchange exchange) throws IOException { + if (!"POST".equals(exchange.getRequestMethod())) { + sendJson(exchange, 405, Map.of("error", "Method Not Allowed")); return; } - Map config = new LinkedHashMap<>(); - config.put("schematicApiUrl", schematicApiUrl); - config.put("cacheTtlMs", cacheTtlMs); - config.put("hasApiKey", true); - config.put("redisUrl", redisUrl); - config.put("replicatorMode", schematic.isReplicatorMode()); - if (useReplicator) { - config.put("replicatorHealthUrl", replicatorHealthUrl); - } + try { + Map config = MAPPER.readValue(exchange.getRequestBody(), Map.class); + logRequest("/configure", config); + currentConfig = config; + + String apiKey = (String) config.get("apiKey"); + String baseUrl = (String) config.get("baseUrl"); + String eventCaptureBaseUrl = (String) config.get("eventCaptureBaseUrl"); + boolean offline = Boolean.TRUE.equals(config.get("offline")); + boolean noCache = Boolean.TRUE.equals(config.get("noCache")); + boolean useDataStream = Boolean.TRUE.equals(config.get("useDataStream")); + String redisUrl = (String) config.get("redisUrl"); + String replicatorUrl = (String) config.get("replicatorUrl"); + + // Parse flag defaults + Map flagDefaults = new HashMap<>(); + Object flagDefaultsRaw = config.get("flagDefaults"); + if (flagDefaultsRaw instanceof Map) { + for (Map.Entry entry : ((Map) flagDefaultsRaw).entrySet()) { + flagDefaults.put(entry.getKey(), Boolean.TRUE.equals(entry.getValue())); + } + } + + // Close existing client if reconfiguring + if (client != null) { + try { + client.close(); + } catch (Exception e) { + // ignore + } + } - Map endpoints = new LinkedHashMap<>(); - endpoints.put("health", "/health"); - endpoints.put("config", "/config"); - endpoints.put("datastreamStatus", "/datastream-status"); - endpoints.put("checkFlag", "/checkflag (POST)"); + Schematic.Builder builder = Schematic.builder().apiKey(apiKey); - Map response = new LinkedHashMap<>(); - response.put("configuration", config); - response.put("endpoints", endpoints); - response.put("timestamp", Instant.now().toString()); + if (baseUrl != null) { + builder.basePath(baseUrl); + } + if (eventCaptureBaseUrl != null) { + builder.eventCaptureBaseUrl(eventCaptureBaseUrl); + } + if (offline) { + builder.offline(true); + } + if (!flagDefaults.isEmpty()) { + builder.flagDefaults(flagDefaults); + } - sendJson(exchange, 200, response); + // Cache configuration + if (noCache) { + builder.cacheProviders(Collections.emptyList()); + } else if (redisUrl != null && !useDataStream) { + // Redis for flag check cache only (no datastream) + // Note: flag check cache uses RedisCacheConfig through DatastreamOptions + // For non-datastream Redis, we use LocalCache with short TTL + builder.cacheProviders( + Collections.singletonList(new LocalCache<>(1000, Duration.ofMillis(CACHE_TTL_MS)))); + } else { + builder.cacheProviders( + Collections.singletonList(new LocalCache<>(1000, Duration.ofMillis(CACHE_TTL_MS)))); + } + + // DataStream configuration + if (useDataStream) { + DatastreamOptions.Builder dsBuilder = + DatastreamOptions.builder().cacheTTL(Duration.ofMillis(CACHE_TTL_MS)); + + if (redisUrl != null) { + dsBuilder.redisCache( + RedisCacheConfig.builder().endpoint(redisUrl).build()); + } + + if (replicatorUrl != null) { + dsBuilder.withReplicatorMode(replicatorUrl); + } + + builder.datastreamOptions(dsBuilder.build()); + } + + client = builder.build(); + + sendJson(exchange, 200, Map.of("success", true)); + } catch (Exception e) { + sendJson(exchange, 500, errorDetail(e)); + } } - private static void handleHealth(HttpExchange exchange) throws IOException { - if (!"GET".equals(exchange.getRequestMethod())) { - sendMethodNotAllowed(exchange); + @SuppressWarnings("unchecked") + private static void handleCheckFlag(HttpExchange exchange) throws IOException { + if (!"POST".equals(exchange.getRequestMethod())) { + sendJson(exchange, 405, Map.of("error", "Method Not Allowed")); + return; + } + if (client == null) { + sendJson(exchange, 503, Map.of("error", "Not configured")); return; } - Map response = new LinkedHashMap<>(); - response.put("status", "healthy"); - response.put("replicatorMode", schematic.isReplicatorMode()); - response.put("datastreamConnected", schematic.isDatastreamConnected()); + try { + Map body = MAPPER.readValue(exchange.getRequestBody(), Map.class); + logRequest("/check-flag", body); + String flagKey = (String) body.get("flagKey"); + Map company = toStringMap(body.get("company")); + Map user = toStringMap(body.get("user")); - Map config = new LinkedHashMap<>(); - config.put("schematicApiUrl", schematicApiUrl); - config.put("cacheTtlMs", cacheTtlMs); - response.put("configuration", config); - response.put("timestamp", Instant.now().toString()); + boolean value = client.checkFlag(flagKey, company, user); - sendJson(exchange, 200, response); + sendJson(exchange, 200, Map.of("value", value)); + } catch (Exception e) { + Map resp = new LinkedHashMap<>(); + resp.put("value", false); + resp.putAll(errorDetail(e)); + sendJson(exchange, 200, resp); + } } - private static void handleDatastreamStatus(HttpExchange exchange) throws IOException { - if (!"GET".equals(exchange.getRequestMethod())) { - sendMethodNotAllowed(exchange); + @SuppressWarnings("unchecked") + private static void handleIdentify(HttpExchange exchange) throws IOException { + if (!"POST".equals(exchange.getRequestMethod())) { + sendJson(exchange, 405, Map.of("error", "Method Not Allowed")); + return; + } + if (client == null) { + sendJson(exchange, 503, Map.of("error", "Not configured")); return; } - Map response = new LinkedHashMap<>(); - response.put("replicatorMode", schematic.isReplicatorMode()); - response.put("datastreamConnected", schematic.isDatastreamConnected()); + try { + Map body = MAPPER.readValue(exchange.getRequestBody(), Map.class); + logRequest("/identify", body); + Map keys = toStringMap(body.get("keys")); + Map companyKeys = toStringMap(body.get("company")); + + // Build company if provided + EventBodyIdentifyCompany company = null; + if (companyKeys != null && !companyKeys.isEmpty()) { + company = EventBodyIdentifyCompany.builder().keys(companyKeys).build(); + } - Map config = new LinkedHashMap<>(); - config.put("schematicApiUrl", schematicApiUrl); - if (useReplicator) { - config.put("replicatorHealthUrl", replicatorHealthUrl); - } - response.put("configuration", config); - response.put("timestamp", Instant.now().toString()); + // User keys default to the top-level keys + Map userKeys = keys; + Map userFromBody = toStringMap(body.get("user")); + if (userFromBody != null && !userFromBody.isEmpty()) { + userKeys = userFromBody; + } - sendJson(exchange, 200, response); + client.identify(userKeys != null ? userKeys : Collections.emptyMap(), company, null, null); + + sendJson(exchange, 200, Map.of("success", true)); + } catch (Exception e) { + Map resp = new LinkedHashMap<>(); + resp.put("success", false); + resp.putAll(errorDetail(e)); + sendJson(exchange, 200, resp); + } } @SuppressWarnings("unchecked") - private static void handleCheckFlag(HttpExchange exchange) throws IOException { + private static void handleTrack(HttpExchange exchange) throws IOException { if (!"POST".equals(exchange.getRequestMethod())) { - sendMethodNotAllowed(exchange); + sendJson(exchange, 405, Map.of("error", "Method Not Allowed")); + return; + } + if (client == null) { + sendJson(exchange, 503, Map.of("error", "Not configured")); return; } try { - InputStream body = exchange.getRequestBody(); - Map requestBody = MAPPER.readValue(body, Map.class); - - String flagKey = (String) requestBody.get("flag-key"); - if (flagKey == null || flagKey.isEmpty()) { - Map error = new LinkedHashMap<>(); - error.put("error", "flag-key is required"); - sendJson(exchange, 400, error); - return; + Map body = MAPPER.readValue(exchange.getRequestBody(), Map.class); + logRequest("/track", body); + String event = (String) body.get("event"); + Map company = toStringMap(body.get("company")); + Map user = toStringMap(body.get("user")); + Integer quantity = body.containsKey("quantity") ? ((Number) body.get("quantity")).intValue() : null; + + if (quantity != null) { + client.track(event, company, user, null, quantity); + } else { + client.track(event, company, user, null); } - Map company = toStringMap(requestBody.get("company")); - Map user = toStringMap(requestBody.get("user")); - - long startTime = System.nanoTime(); - RulesengineCheckFlagResult result = schematic.checkFlagWithEntitlement(flagKey, company, user); - double durationMs = (System.nanoTime() - startTime) / 1_000_000.0; - - Map response = new LinkedHashMap<>(); - response.put("flagKey", result.getFlagKey()); - response.put("value", result.getValue()); - response.put("reason", result.getReason()); - result.getFlagId().ifPresent(v -> response.put("flagId", v)); - result.getCompanyId().ifPresent(v -> response.put("companyId", v)); - result.getUserId().ifPresent(v -> response.put("userId", v)); - result.getRuleId().ifPresent(v -> response.put("ruleId", v)); - result.getErr().ifPresent(v -> response.put("error", v)); - response.put("replicatorMode", schematic.isReplicatorMode()); - response.put("datastreamConnected", schematic.isDatastreamConnected()); - response.put("durationMs", durationMs); - response.put("timestamp", Instant.now().toString()); - - sendJson(exchange, 200, response); + sendJson(exchange, 200, Map.of("success", true)); + } catch (Exception e) { + Map resp = new LinkedHashMap<>(); + resp.put("success", false); + resp.putAll(errorDetail(e)); + sendJson(exchange, 200, resp); + } + } + + @SuppressWarnings("unchecked") + private static void handleSetFlagDefault(HttpExchange exchange) throws IOException { + if (!"POST".equals(exchange.getRequestMethod())) { + sendJson(exchange, 405, Map.of("error", "Method Not Allowed")); + return; + } + if (client == null) { + sendJson(exchange, 503, Map.of("error", "Not configured")); + return; + } + + try { + Map body = MAPPER.readValue(exchange.getRequestBody(), Map.class); + logRequest("/set-flag-default", body); + String flagKey = (String) body.get("flagKey"); + boolean value = Boolean.TRUE.equals(body.get("value")); + + client.setFlagDefault(flagKey, value); + + sendJson(exchange, 200, Map.of("success", true)); } catch (Exception e) { - Map error = new LinkedHashMap<>(); - error.put("error", "Flag Check Failed"); - error.put("detail", e.getMessage()); - sendJson(exchange, 500, error); + Map resp = new LinkedHashMap<>(); + resp.put("success", false); + resp.putAll(errorDetail(e)); + sendJson(exchange, 200, resp); } } @@ -265,30 +319,36 @@ private static Map toStringMap(Object obj) { return result; } - private static void sendText(HttpExchange exchange, int status, String text) throws IOException { - byte[] bytes = text.getBytes("UTF-8"); - exchange.getResponseHeaders().set("Content-Type", "text/plain; charset=utf-8"); - exchange.sendResponseHeaders(status, bytes.length); - try (OutputStream os = exchange.getResponseBody()) { - os.write(bytes); + private static void logRequest(String endpoint, Map body) { + try { + System.out.println("[" + endpoint + "] " + MAPPER.writeValueAsString(body)); + } catch (Exception e) { + System.out.println("[" + endpoint + "] (failed to serialize body)"); + } + } + + private static Map errorDetail(Exception e) { + Map detail = new LinkedHashMap<>(); + detail.put("type", e.getClass().getName()); + detail.put("message", e.getMessage()); + if (e.getCause() != null) { + detail.put("cause", e.getCause().getClass().getName() + ": " + e.getCause().getMessage()); } + StringWriter sw = new StringWriter(); + e.printStackTrace(new PrintWriter(sw)); + detail.put("stackTrace", sw.toString()); + // Also print to stderr for test runner visibility + System.err.println("[ERROR] " + e.getClass().getName() + ": " + e.getMessage()); + e.printStackTrace(System.err); + return detail; } private static void sendJson(HttpExchange exchange, int status, Object obj) throws IOException { - byte[] bytes = MAPPER.writerWithDefaultPrettyPrinter().writeValueAsBytes(obj); + byte[] bytes = MAPPER.writeValueAsBytes(obj); exchange.getResponseHeaders().set("Content-Type", "application/json"); exchange.sendResponseHeaders(status, bytes.length); try (OutputStream os = exchange.getResponseBody()) { os.write(bytes); } } - - private static void sendMethodNotAllowed(HttpExchange exchange) throws IOException { - sendText(exchange, 405, "Method Not Allowed"); - } - - private static String envOrDefault(String key, String defaultValue) { - String value = System.getenv(key); - return (value != null && !value.isEmpty()) ? value : defaultValue; - } } diff --git a/src/main/java/com/schematic/api/EventBuffer.java b/src/main/java/com/schematic/api/EventBuffer.java index 61af8d7..8e95c76 100644 --- a/src/main/java/com/schematic/api/EventBuffer.java +++ b/src/main/java/com/schematic/api/EventBuffer.java @@ -123,14 +123,15 @@ private void sendBatchWithRetry(List batch, int retryCou // Add ±25% jitter double jitter = (Math.random() - 0.5) * 0.5 * baseDelay; long delayMillis = Math.max(0, baseDelay + (long) jitter); - logger.warn( - "Failed to send event batch, attempting retry %d of %d in %d ms", - retryCount + 1, MAX_RETRY_ATTEMPTS, delayMillis); + logger.warn(String.format( + "Failed to send event batch (%s: %s), attempting retry %d of %d in %d ms", + e.getClass().getSimpleName(), e.getMessage(), retryCount + 1, MAX_RETRY_ATTEMPTS, delayMillis)); scheduler.schedule(() -> sendBatchWithRetry(batch, retryCount + 1), delayMillis, TimeUnit.MILLISECONDS); } else { failedEvents.addAndGet(batch.size()); - logger.error("Failed to flush events: " + e.getMessage()); + logger.error("Failed to flush events after " + MAX_RETRY_ATTEMPTS + " retries: " + + e.getClass().getName() + ": " + e.getMessage()); } } } diff --git a/src/main/java/com/schematic/api/HttpEventSender.java b/src/main/java/com/schematic/api/HttpEventSender.java index 6605668..777e227 100644 --- a/src/main/java/com/schematic/api/HttpEventSender.java +++ b/src/main/java/com/schematic/api/HttpEventSender.java @@ -1,7 +1,6 @@ package com.schematic.api; import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.schematic.api.core.ObjectMappers; @@ -51,12 +50,19 @@ public void sendBatch(List events) throws IOException { return; } + // Build batch matching the capture service format (same as Go SDK's EventPayload) ArrayNode eventsArray = ObjectMappers.JSON_MAPPER.createArrayNode(); for (CreateEventRequestBody event : events) { - // Serialize the Fern model to a JSON tree, preserving all current and future fields - JsonNode eventNode = ObjectMappers.JSON_MAPPER.valueToTree(event); - if (eventNode.isObject()) { - ((ObjectNode) eventNode).put("api_key", apiKey); + ObjectNode eventNode = ObjectMappers.JSON_MAPPER.createObjectNode(); + eventNode.put("api_key", apiKey); + eventNode.put("type", event.getEventType().toString()); + if (event.getBody().isPresent()) { + eventNode.set( + "body", + ObjectMappers.JSON_MAPPER.valueToTree(event.getBody().get())); + } + if (event.getSentAt().isPresent()) { + eventNode.put("sent_at", event.getSentAt().get().toString()); } eventsArray.add(eventNode); } @@ -71,18 +77,31 @@ public void sendBatch(List events) throws IOException { throw new IOException("Failed to serialize event batch", e); } + String url = baseUrl + "/batch"; + Request request = new Request.Builder() - .url(baseUrl + "/batch") + .url(url) .post(RequestBody.create(json, JSON)) .addHeader("X-Schematic-Api-Key", apiKey) .addHeader("Content-Type", "application/json") .build(); + if (logger != null) { + logger.debug("Sending event batch (" + events.size() + " events) to " + url); + logger.debug("Event batch payload: " + json); + } + try (Response response = httpClient.newCall(request).execute()) { if (!response.isSuccessful()) { String responseBody = response.body() != null ? response.body().string() : ""; + if (logger != null) { + logger.error("Event batch failed: HTTP " + response.code() + " from " + url + ": " + responseBody); + } throw new IOException("HTTP " + response.code() + ": " + responseBody); } + if (logger != null) { + logger.debug("Event batch sent successfully to " + url); + } } } diff --git a/src/main/java/com/schematic/api/Schematic.java b/src/main/java/com/schematic/api/Schematic.java index fd59a3d..0822c4c 100644 --- a/src/main/java/com/schematic/api/Schematic.java +++ b/src/main/java/com/schematic/api/Schematic.java @@ -214,6 +214,13 @@ public boolean isOffline() { return this.offline; } + /** + * Sets a default value for a specific flag at runtime. + */ + public void setFlagDefault(String flagKey, boolean value) { + flagDefaults.put(flagKey, value); + } + /** * Returns the DataStream client, or null if datastream is not configured. */ @@ -465,6 +472,9 @@ private String buildCacheKey(String flagKey, Map company, Map objectMapToJsonNode(Map map) { + if (map == null) { + return null; + } Map result = new HashMap<>(); for (Map.Entry entry : map.entrySet()) { result.put(entry.getKey(), ObjectMappers.JSON_MAPPER.valueToTree(entry.getValue())); From 1041078e31dcc666a2e45178e956631512dfc353 Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Fri, 3 Apr 2026 16:04:17 -0600 Subject: [PATCH 24/27] be more flexible with redis cache config --- .../api/datastream/DataStreamCacheFactory.java | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/src/main/java/com/schematic/api/datastream/DataStreamCacheFactory.java b/src/main/java/com/schematic/api/datastream/DataStreamCacheFactory.java index dff24d8..febe570 100644 --- a/src/main/java/com/schematic/api/datastream/DataStreamCacheFactory.java +++ b/src/main/java/com/schematic/api/datastream/DataStreamCacheFactory.java @@ -67,7 +67,22 @@ static JedisPooled buildRedisClient(RedisCacheConfig config) { } String endpoint = config.getEndpoints().get(0); - String[] parts = endpoint.split(":"); + + // Strip redis:// or rediss:// URI scheme if present + String hostPort = endpoint; + if (hostPort.startsWith("redis://")) { + hostPort = hostPort.substring("redis://".length()); + } else if (hostPort.startsWith("rediss://")) { + hostPort = hostPort.substring("rediss://".length()); + } + + // Strip trailing path/slash (e.g. "localhost:6379/0" -> "localhost:6379") + int slashIdx = hostPort.indexOf('/'); + if (slashIdx >= 0) { + hostPort = hostPort.substring(0, slashIdx); + } + + String[] parts = hostPort.split(":"); String host = parts[0]; int port = parts.length > 1 ? Integer.parseInt(parts[1]) : 6379; From f02c3d102564bcac72e80d448a0187d580306d78 Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Mon, 6 Apr 2026 12:13:46 -0600 Subject: [PATCH 25/27] add checkflags functionality --- .../java/com/schematic/api/Schematic.java | 289 +++++++++++++----- .../java/com/schematic/api/TestSchematic.java | 264 ++++++++++++++++ 2 files changed, 484 insertions(+), 69 deletions(-) diff --git a/src/main/java/com/schematic/api/Schematic.java b/src/main/java/com/schematic/api/Schematic.java index 0822c4c..f6b5bac 100644 --- a/src/main/java/com/schematic/api/Schematic.java +++ b/src/main/java/com/schematic/api/Schematic.java @@ -13,6 +13,7 @@ import com.schematic.api.logger.ConsoleLogger; import com.schematic.api.logger.SchematicLogger; import com.schematic.api.resources.features.types.CheckFlagResponse; +import com.schematic.api.resources.features.types.CheckFlagsResponse; import com.schematic.api.types.CheckFlagRequestBody; import com.schematic.api.types.CheckFlagResponseData; import com.schematic.api.types.CreateEventRequestBody; @@ -25,6 +26,7 @@ import com.schematic.api.types.RulesengineCheckFlagResult; import java.time.Duration; import java.time.OffsetDateTime; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -267,53 +269,228 @@ public boolean checkFlag(String flagKey, Map company, Map company, Map user) { if (offline) { - return RulesengineCheckFlagResult.builder() + return defaultFlagResult(flagKey, "flag default", null); + } + + RulesengineCheckFlagResult dsResult = tryDatastreamCheckFlag(flagKey, company, user); + if (dsResult != null) { + return dsResult; + } + + return checkFlagViaApi(flagKey, company, user); + } + + private RulesengineCheckFlagResult defaultFlagResult(String flagKey, String reason, String err) { + return RulesengineCheckFlagResult.builder() + .flagKey(flagKey) + .reason(reason) + .value(getFlagDefault(flagKey)) + .err(err) + .build(); + } + + /** + * Attempts to evaluate a flag via the datastream client. Returns the result on + * success (and emits a {@code flag_check} event), or {@code null} if datastream is + * not configured/connected or evaluation failed. + */ + private RulesengineCheckFlagResult tryDatastreamCheckFlag( + String flagKey, Map company, Map user) { + if (dataStreamClient == null || !dataStreamClient.isConnected()) { + return null; + } + try { + RulesengineCheckFlagResult result = dataStreamClient.checkFlag(flagKey, company, user); + enqueueFlagCheckEvent(flagKey, result, company, user); + return result; + } catch (Exception e) { + logger.debug("Datastream flag check failed for " + flagKey + ", falling back to API: " + e.getMessage()); + return null; + } + } + + private RulesengineCheckFlagResult getCachedFlag( + String flagKey, Map company, Map user) { + String cacheKey = buildCacheKey(flagKey, company, user); + for (CacheProvider provider : flagCheckCacheProviders) { + RulesengineCheckFlagResult cached = provider.get(cacheKey); + if (cached != null) { + return cached; + } + } + return null; + } + + private void cacheFlag( + String flagKey, RulesengineCheckFlagResult result, Map company, Map user) { + String cacheKey = buildCacheKey(flagKey, company, user); + for (CacheProvider provider : flagCheckCacheProviders) { + provider.set(cacheKey, result); + } + } + + private void enqueueFlagCheckEvent( + String flagKey, RulesengineCheckFlagResult result, Map company, Map user) { + try { + EventBodyFlagCheck flagCheckBody = EventBodyFlagCheck.builder() .flagKey(flagKey) - .reason("flag default") - .value(getFlagDefault(flagKey)) + .reason(result.getReason()) + .value(result.getValue()) + .companyId(result.getCompanyId().orElse(null)) + .userId(result.getUserId().orElse(null)) + .flagId(result.getFlagId().orElse(null)) + .ruleId(result.getRuleId().orElse(null)) + .reqCompany(company) + .reqUser(user) + .error(result.getErr().orElse(null)) .build(); + + CreateEventRequestBody event = CreateEventRequestBody.builder() + .eventType(EventType.FLAG_CHECK) + .body(EventBody.of(flagCheckBody)) + .sentAt(OffsetDateTime.now()) + .build(); + + eventBuffer.push(event); + } catch (Exception e) { + logger.error("Failed to enqueue flag_check event: " + e.getMessage()); } + } - // Try datastream first if available - if (dataStreamClient != null && dataStreamClient.isConnected()) { - try { - RulesengineCheckFlagResult result = dataStreamClient.checkFlag(flagKey, company, user); + /** + * Checks multiple feature flags, returning the full evaluation results in the + * same order as the requested keys. + * + *

      Evaluation order: + *

        + *
      1. Offline mode → return flag defaults for the requested keys
      2. + *
      3. DataStream / replicator (if configured and connected) → evaluate each key + * locally; falls back to the API if any key fails
      4. + *
      5. Otherwise → look up each requested key in the result cache; if any are + * missing, issue a single bulk {@code features.checkFlags} API call to fetch + * fresh values, refresh the cache, and merge the results
      6. + *
      + * + *

      If {@code flagKeys} is null or empty, the bulk API is called once to discover + * all flags available for the given context. + */ + public List checkFlags( + List flagKeys, Map company, Map user) { + // 1. Offline → return flag defaults for the requested keys. If no keys were + // provided, fall back to every key in the configured flag defaults map. + if (offline) { + Iterable keysToReturn = (flagKeys == null || flagKeys.isEmpty()) ? flagDefaults.keySet() : flagKeys; + List results = new ArrayList<>(); + for (String key : keysToReturn) { + if (key == null) continue; + results.add(defaultFlagResult(key, "Offline mode - using default value", null)); + } + return results; + } - // Enqueue flag_check event for analytics - try { - EventBodyFlagCheck flagCheckBody = EventBodyFlagCheck.builder() - .flagKey(flagKey) - .reason(result.getReason()) - .value(result.getValue()) - .companyId(result.getCompanyId().orElse(null)) - .userId(result.getUserId().orElse(null)) - .flagId(result.getFlagId().orElse(null)) - .ruleId(result.getRuleId().orElse(null)) - .reqCompany(company) - .reqUser(user) - .error(result.getErr().orElse(null)) - .build(); - - CreateEventRequestBody event = CreateEventRequestBody.builder() - .eventType(EventType.FLAG_CHECK) - .body(EventBody.of(flagCheckBody)) - .sentAt(OffsetDateTime.now()) - .build(); - - eventBuffer.push(event); - } catch (Exception e) { - logger.error("Failed to enqueue flag_check event: " + e.getMessage()); + // 2. DataStream/replicator path: evaluate each key; on any failure fall back to API. + if (dataStreamClient != null && dataStreamClient.isConnected() && flagKeys != null && !flagKeys.isEmpty()) { + List dsResults = new ArrayList<>(flagKeys.size()); + boolean dsOk = true; + for (String key : flagKeys) { + if (key == null) continue; + RulesengineCheckFlagResult result = tryDatastreamCheckFlag(key, company, user); + if (result == null) { + dsOk = false; + break; } + dsResults.add(result); + } + if (dsOk) { + return dsResults; + } + } - return result; - } catch (Exception e) { - logger.debug( - "Datastream flag check failed for " + flagKey + ", falling back to API: " + e.getMessage()); + // 3. Cache + bulk API path. + try { + CheckFlagRequestBody request = + CheckFlagRequestBody.builder().company(company).user(user).build(); + + // No keys → discover all flags for the context via the bulk API. + if (flagKeys == null || flagKeys.isEmpty()) { + CheckFlagsResponse response = features().checkFlags(request); + List flags = response.getData().getFlags(); + List all = new ArrayList<>(flags.size()); + for (CheckFlagResponseData f : flags) { + all.add(toRulesengineResult(f)); + } + return all; + } + + // Look up each key in the cache; track which are missing. + Map cachedResults = new HashMap<>(); + boolean anyMissing = false; + for (String key : flagKeys) { + if (key == null) continue; + RulesengineCheckFlagResult hit = getCachedFlag(key, company, user); + if (hit != null) { + cachedResults.put(key, hit); + } else { + anyMissing = true; + } + } + + // All cached → return without an API call. + if (!anyMissing) { + List results = new ArrayList<>(flagKeys.size()); + for (String key : flagKeys) { + if (key == null) continue; + results.add(cachedResults.get(key)); + } + return results; + } + + // Cache miss → one bulk API call; refresh cache for everything returned. + Map apiResults = new HashMap<>(); + CheckFlagsResponse response = features().checkFlags(request); + for (CheckFlagResponseData f : response.getData().getFlags()) { + RulesengineCheckFlagResult result = toRulesengineResult(f); + apiResults.put(f.getFlag(), result); + cacheFlag(f.getFlag(), result, company, user); + } + + // Build results in requested key order. Prefer fresh API values, fall back + // to the configured flag default for any keys missing from the response. + List results = new ArrayList<>(flagKeys.size()); + for (String key : flagKeys) { + if (key == null) continue; + RulesengineCheckFlagResult fresh = apiResults.get(key); + if (fresh != null) { + results.add(fresh); + } else { + results.add(defaultFlagResult(key, "Flag not found - using default value", null)); + } } + return results; + } catch (Exception e) { + logger.error("Error checking flags via API: " + e.getMessage()); + List fallback = new ArrayList<>(); + if (flagKeys != null) { + for (String key : flagKeys) { + if (key == null) continue; + fallback.add(defaultFlagResult( + key, "Error occurred - using default value: " + e.getMessage(), e.getMessage())); + } + } + return fallback; } + } - // Fall back to API - return checkFlagViaApi(flagKey, company, user); + private RulesengineCheckFlagResult toRulesengineResult(CheckFlagResponseData data) { + return RulesengineCheckFlagResult.builder() + .flagKey(data.getFlag()) + .reason(data.getReason()) + .value(data.getValue()) + .flagId(data.getFlagId().orElse(null)) + .companyId(data.getCompanyId().orElse(null)) + .userId(data.getUserId().orElse(null)) + .ruleId(data.getRuleId().orElse(null)) + .build(); } /** @@ -322,47 +499,21 @@ public RulesengineCheckFlagResult checkFlagWithEntitlement( private RulesengineCheckFlagResult checkFlagViaApi( String flagKey, Map company, Map user) { try { - String cacheKey = buildCacheKey(flagKey, company, user); - - // Check flag check result cache - for (CacheProvider provider : flagCheckCacheProviders) { - RulesengineCheckFlagResult cached = provider.get(cacheKey); - if (cached != null) { - return cached; - } + RulesengineCheckFlagResult cached = getCachedFlag(flagKey, company, user); + if (cached != null) { + return cached; } - // Make API call CheckFlagRequestBody request = CheckFlagRequestBody.builder().company(company).user(user).build(); - CheckFlagResponse response = features().checkFlag(flagKey, request); - CheckFlagResponseData data = response.getData(); - - RulesengineCheckFlagResult result = RulesengineCheckFlagResult.builder() - .flagKey(flagKey) - .reason(data.getReason()) - .value(data.getValue()) - .flagId(data.getFlagId().orElse(null)) - .companyId(data.getCompanyId().orElse(null)) - .userId(data.getUserId().orElse(null)) - .ruleId(data.getRuleId().orElse(null)) - .build(); - - // Update flag check result cache - for (CacheProvider provider : flagCheckCacheProviders) { - provider.set(cacheKey, result); - } + RulesengineCheckFlagResult result = toRulesengineResult(response.getData()); + cacheFlag(flagKey, result, company, user); return result; } catch (Exception e) { logger.error("Error checking flag via API: " + e.getMessage()); - return RulesengineCheckFlagResult.builder() - .flagKey(flagKey) - .reason("flag default") - .value(getFlagDefault(flagKey)) - .err(e.getMessage()) - .build(); + return defaultFlagResult(flagKey, "flag default", e.getMessage()); } } diff --git a/src/test/java/com/schematic/api/TestSchematic.java b/src/test/java/com/schematic/api/TestSchematic.java index b8fe156..ca5978f 100644 --- a/src/test/java/com/schematic/api/TestSchematic.java +++ b/src/test/java/com/schematic/api/TestSchematic.java @@ -11,13 +11,17 @@ import com.schematic.api.logger.SchematicLogger; import com.schematic.api.resources.features.FeaturesClient; import com.schematic.api.resources.features.types.CheckFlagResponse; +import com.schematic.api.resources.features.types.CheckFlagsResponse; import com.schematic.api.types.CheckFlagRequestBody; import com.schematic.api.types.CheckFlagResponseData; +import com.schematic.api.types.CheckFlagsResponseData; import com.schematic.api.types.EventBodyIdentifyCompany; import com.schematic.api.types.RulesengineCheckFlagResult; import java.time.Duration; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -399,4 +403,264 @@ void checkFlag_DifferentCacheKeysForDifferentContexts() { // Both calls should hit API since they have different cache keys verify(featuresClient, times(2)).checkFlag(eq("ctx_flag"), any(CheckFlagRequestBody.class)); } + + // ========================================================================= + // checkFlags batch tests — mirrors schematic-ruby/test/custom.test.rb:953 + // ========================================================================= + + @Test + void checkFlags_OfflineModeReturnsDefaults() { + Map defaults = new HashMap<>(); + defaults.put("flag-a", true); + defaults.put("flag-b", false); + Schematic offlineSchematic = Schematic.builder() + .apiKey("test_api_key") + .offline(true) + .flagDefaults(defaults) + .logger(logger) + .build(); + + List results = + offlineSchematic.checkFlags(Arrays.asList("flag-a", "flag-b", "flag-c"), null, null); + + assertEquals(3, results.size()); + assertEquals("flag-a", results.get(0).getFlagKey()); + assertTrue(results.get(0).getValue()); + assertEquals("flag-b", results.get(1).getFlagKey()); + assertFalse(results.get(1).getValue()); + // unknown flag defaults to false + assertEquals("flag-c", results.get(2).getFlagKey()); + assertFalse(results.get(2).getValue()); + for (RulesengineCheckFlagResult r : results) { + assertEquals("Offline mode - using default value", r.getReason()); + } + } + + @Test + void checkFlags_OfflineModeNoKeysReturnsAllConfiguredDefaults() { + Map defaults = new HashMap<>(); + defaults.put("default-a", true); + defaults.put("default-b", false); + Schematic offlineSchematic = Schematic.builder() + .apiKey("test_api_key") + .offline(true) + .flagDefaults(defaults) + .logger(logger) + .build(); + + List results = offlineSchematic.checkFlags(null, null, null); + + assertEquals(2, results.size()); + Map flagMap = new HashMap<>(); + for (RulesengineCheckFlagResult r : results) { + flagMap.put(r.getFlagKey(), r.getValue()); + } + assertTrue(flagMap.get("default-a")); + assertFalse(flagMap.get("default-b")); + } + + @Test + void checkFlags_OfflineModeNoKeysNoDefaultsReturnsEmpty() { + Schematic offlineSchematic = Schematic.builder() + .apiKey("test_api_key") + .offline(true) + .logger(logger) + .build(); + + List results = offlineSchematic.checkFlags(null, null, null); + + assertTrue(results.isEmpty()); + } + + @Test + void checkFlags_ReturnsValuesFromBulkApi() { + FeaturesClient featuresClient = mock(FeaturesClient.class); + Schematic spySchematic = spy(Schematic.builder() + .apiKey("test_api_key") + .cacheProviders(Collections.emptyList()) + .flagDefaults(Collections.singletonMap("flag-c", true)) + .logger(logger) + .build()); + when(spySchematic.features()).thenReturn(featuresClient); + + CheckFlagsResponse response = CheckFlagsResponse.builder() + .data(CheckFlagsResponseData.builder() + .flags(Arrays.asList( + CheckFlagResponseData.builder() + .flag("flag-a") + .reason("match") + .value(true) + .build(), + CheckFlagResponseData.builder() + .flag("flag-b") + .reason("no match") + .value(false) + .build())) + .build()) + .build(); + when(featuresClient.checkFlags(any(CheckFlagRequestBody.class))).thenReturn(response); + + List results = spySchematic.checkFlags( + Arrays.asList("flag-a", "flag-b", "flag-c"), Collections.singletonMap("org_id", "abc"), null); + + assertEquals(3, results.size()); + assertEquals("flag-a", results.get(0).getFlagKey()); + assertTrue(results.get(0).getValue()); + assertEquals("flag-b", results.get(1).getFlagKey()); + assertFalse(results.get(1).getValue()); + // flag-c not returned by API → falls back to default (true) + assertEquals("flag-c", results.get(2).getFlagKey()); + assertTrue(results.get(2).getValue()); + assertEquals("Flag not found - using default value", results.get(2).getReason()); + } + + @Test + void checkFlags_AllCachedReturnsWithoutBulkApiCall() { + FeaturesClient featuresClient = mock(FeaturesClient.class); + Schematic spySchematic = spy(schematic); + when(spySchematic.features()).thenReturn(featuresClient); + + Map company = Collections.singletonMap("org_id", "abc"); + + // Pre-populate cache for both flags via individual checks + CheckFlagResponse responseX = CheckFlagResponse.builder() + .data(CheckFlagResponseData.builder() + .flag("flag-x") + .reason("match") + .value(true) + .build()) + .build(); + CheckFlagResponse responseY = CheckFlagResponse.builder() + .data(CheckFlagResponseData.builder() + .flag("flag-y") + .reason("no match") + .value(false) + .build()) + .build(); + when(featuresClient.checkFlag(eq("flag-x"), any(CheckFlagRequestBody.class))) + .thenReturn(responseX); + when(featuresClient.checkFlag(eq("flag-y"), any(CheckFlagRequestBody.class))) + .thenReturn(responseY); + + spySchematic.checkFlag("flag-x", company, null); + spySchematic.checkFlag("flag-y", company, null); + + // Batch check — both should come from cache, no bulk API call should fire + List results = + spySchematic.checkFlags(Arrays.asList("flag-x", "flag-y"), company, null); + + assertEquals(2, results.size()); + assertTrue(results.get(0).getValue()); + assertFalse(results.get(1).getValue()); + verify(featuresClient, never()).checkFlags(any(CheckFlagRequestBody.class)); + } + + @Test + void checkFlags_FetchesFreshValuesForAllKeysOnAnyCacheMiss() { + FeaturesClient featuresClient = mock(FeaturesClient.class); + Schematic spySchematic = spy(schematic); + when(spySchematic.features()).thenReturn(featuresClient); + + Map company = Collections.singletonMap("org_id", "abc"); + + // Pre-populate cache for one flag via individual check (stale value: true) + CheckFlagResponse cachedResponse = CheckFlagResponse.builder() + .data(CheckFlagResponseData.builder() + .flag("cached-flag") + .reason("old cached reason") + .value(true) + .build()) + .build(); + when(featuresClient.checkFlag(eq("cached-flag"), any(CheckFlagRequestBody.class))) + .thenReturn(cachedResponse); + spySchematic.checkFlag("cached-flag", company, null); + + // Bulk API returns updated value (false) for cached-flag, plus uncached-flag + CheckFlagsResponse bulkResponse = CheckFlagsResponse.builder() + .data(CheckFlagsResponseData.builder() + .flags(Arrays.asList( + CheckFlagResponseData.builder() + .flag("cached-flag") + .reason("updated reason") + .value(false) + .build(), + CheckFlagResponseData.builder() + .flag("uncached-flag") + .reason("new match") + .value(true) + .build())) + .build()) + .build(); + when(featuresClient.checkFlags(any(CheckFlagRequestBody.class))).thenReturn(bulkResponse); + + List results = + spySchematic.checkFlags(Arrays.asList("cached-flag", "uncached-flag"), company, null); + + assertEquals(2, results.size()); + // cached-flag should use fresh API value (false), not stale cached value (true) + assertFalse(results.get(0).getValue(), "should use fresh API value, not stale cache"); + assertEquals("updated reason", results.get(0).getReason()); + assertTrue(results.get(1).getValue()); + } + + @Test + void checkFlags_ReturnsDefaultsOnApiError() { + FeaturesClient featuresClient = mock(FeaturesClient.class); + Schematic spySchematic = spy(Schematic.builder() + .apiKey("test_api_key") + .cacheProviders(Collections.emptyList()) + .flagDefaults(Collections.singletonMap("err-flag", true)) + .logger(logger) + .build()); + when(spySchematic.features()).thenReturn(featuresClient); + when(featuresClient.checkFlags(any(CheckFlagRequestBody.class))) + .thenThrow(new RuntimeException("Internal Server Error")); + + List results = + spySchematic.checkFlags(Arrays.asList("err-flag", "other-flag"), null, null); + + assertEquals(2, results.size()); + // err-flag has a configured default of true + assertTrue(results.get(0).getValue()); + // other-flag has no default → false + assertFalse(results.get(1).getValue()); + verify(logger).error(contains("Error checking flags via API")); + } + + @Test + void checkFlags_NoKeysCallsBulkApi() { + FeaturesClient featuresClient = mock(FeaturesClient.class); + Schematic spySchematic = spy(Schematic.builder() + .apiKey("test_api_key") + .cacheProviders(Collections.emptyList()) + .logger(logger) + .build()); + when(spySchematic.features()).thenReturn(featuresClient); + + CheckFlagsResponse response = CheckFlagsResponse.builder() + .data(CheckFlagsResponseData.builder() + .flags(Arrays.asList( + CheckFlagResponseData.builder() + .flag("auto-a") + .reason("match") + .value(true) + .build(), + CheckFlagResponseData.builder() + .flag("auto-b") + .reason("no match") + .value(false) + .build())) + .build()) + .build(); + when(featuresClient.checkFlags(any(CheckFlagRequestBody.class))).thenReturn(response); + + List results = + spySchematic.checkFlags(null, Collections.singletonMap("org_id", "abc"), null); + + assertEquals(2, results.size()); + assertEquals("auto-a", results.get(0).getFlagKey()); + assertTrue(results.get(0).getValue()); + assertEquals("auto-b", results.get(1).getFlagKey()); + assertFalse(results.get(1).getValue()); + } } From 72516c6a38579526b134564b3bf9515f53a10f7e Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Wed, 8 Apr 2026 12:13:59 -0600 Subject: [PATCH 26/27] fern ignore github workflows --- .fernignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.fernignore b/.fernignore index 8d2c2df..a3c7968 100644 --- a/.fernignore +++ b/.fernignore @@ -4,6 +4,7 @@ CLAUDE.md LICENSE README.md .github/CODEOWNERS +.github/workflows/ci.yml WASM_VERSION scripts/ src/main/java/com/schematic/api/BaseSchematic.java From 8ecb01d1468ba62a3aa055145a17b2d48e482c9f Mon Sep 17 00:00:00 2001 From: Christopher Brady Date: Fri, 10 Apr 2026 08:41:12 -0600 Subject: [PATCH 27/27] update partial test --- .../api/datastream/EntityMergeTest.java | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/src/test/java/com/schematic/api/datastream/EntityMergeTest.java b/src/test/java/com/schematic/api/datastream/EntityMergeTest.java index 80b9151..4ede4f3 100644 --- a/src/test/java/com/schematic/api/datastream/EntityMergeTest.java +++ b/src/test/java/com/schematic/api/datastream/EntityMergeTest.java @@ -290,14 +290,20 @@ void partialCompany_emptyEntitlementsClearsExisting() { } @Test - void partialCompany_missingIdThrowsError() { + void partialCompany_toleratesMissingId() { + // Wire shape from the API: data is wrapped under the field name, + // no id at the top level. Cache lookup happens at the handler level + // using entityId from the envelope, not from the data payload. RulesengineCompany existing = buildCompany("comp-1", Collections.singletonMap("id", "comp-1")); ObjectNode partial = objectMapper.createObjectNode(); - partial.putNull("id"); partial.put("account_id", "acc_new"); - assertThrows(Exception.class, () -> EntityMerge.partialCompany(existing, partial)); + RulesengineCompany merged = EntityMerge.partialCompany(existing, partial); + + // Existing id is preserved; account_id is updated. + assertEquals("comp-1", merged.getId()); + assertEquals("acc_new", merged.getAccountId()); } @Test @@ -452,14 +458,20 @@ void partialUser_onlyTraits() { } @Test - void partialUser_missingIdThrowsError() { + void partialUser_toleratesMissingId() { + // Wire shape from the API: data is wrapped under the field name, + // no id at the top level. Cache lookup happens at the handler level + // using entityId from the envelope, not from the data payload. RulesengineUser existing = buildUser("user-1", Collections.singletonMap("id", "user-1")); ObjectNode partial = objectMapper.createObjectNode(); - partial.putNull("id"); partial.put("account_id", "acc_new"); - assertThrows(Exception.class, () -> EntityMerge.partialUser(existing, partial)); + RulesengineUser merged = EntityMerge.partialUser(existing, partial); + + // Existing id is preserved; account_id is updated. + assertEquals("user-1", merged.getId()); + assertEquals("acc_new", merged.getAccountId()); } @Test