diff --git a/backends/advanced-backend/.env.template b/backends/advanced-backend/.env.template
index d5337c11..954c106c 100644
--- a/backends/advanced-backend/.env.template
+++ b/backends/advanced-backend/.env.template
@@ -8,19 +8,22 @@ ADMIN_PASSWORD=
ADMIN_EMAIL=admin@example.com
# ========================================
-# LLM CONFIGURATION (Choose one)
+# LLM CONFIGURATION (Standard)
# ========================================
-# LLM Provider: "openai" or "ollama" (default: ollama)
+# LLM Provider: "openai" or "ollama" (default: openai)
LLM_PROVIDER=openai
-# For OpenAI (recommended for best memory extraction)
-OPENAI_API_KEY=
+# OpenAI or OpenAI-compatible API configuration
+OPENAI_API_KEY=your-openai-key-here
+OPENAI_BASE_URL=https://api.openai.com/v1
OPENAI_MODEL=gpt-4o
-# For Ollama (local LLM)
-OLLAMA_BASE_URL=http://ollama:11434
-# OLLAMA_MODEL=gemma3n:e4b
+# For Ollama (OpenAI-compatible mode):
+# LLM_PROVIDER=ollama
+# OPENAI_API_KEY=dummy
+# OPENAI_BASE_URL=http://ollama:11434/v1
+# OPENAI_MODEL=llama3.1:latest
# ========================================
# SPEECH-TO-TEXT CONFIGURATION (Choose one)
@@ -42,10 +45,15 @@ MONGODB_URI=mongodb://mongo:27017
# Qdrant for vector memory storage (defaults to qdrant)
QDRANT_BASE_URL=qdrant
+
# ========================================
# OPTIONAL FEATURES
# ========================================
+NEO4J_HOST=neo4j-mem0
+NEO4J_USER=neo4j
+NEO4J_PASSWORD=
+
# Debug directory for troubleshooting
DEBUG_DIR=./debug_dir
diff --git a/backends/advanced-backend/Docs/README.md b/backends/advanced-backend/Docs/README.md
index 7a50b126..693722d7 100644
--- a/backends/advanced-backend/Docs/README.md
+++ b/backends/advanced-backend/Docs/README.md
@@ -2,7 +2,7 @@
## đ **New Developer Reading Order**
-Welcome to friend-lite! This guide provides the optimal reading sequence to understand the complete voice â transcript â memories + action items system.
+Welcome to friend-lite! This guide provides the optimal reading sequence to understand the complete voice â transcript â memories system.
---
@@ -10,7 +10,7 @@ Welcome to friend-lite! This guide provides the optimal reading sequence to unde
### 1. **[Overview & Quick Start](./quickstart.md)** â *START HERE*
**Read first** - Complete system overview and setup guide
-- What the system does (voice â memories + action items)
+- What the system does (voice â memories)
- Key features and capabilities
- Basic setup and configuration
- **Code References**: `main.py`, `memory_config.yaml`, `docker-compose.yml`
@@ -36,17 +36,7 @@ Welcome to friend-lite! This guide provides the optimal reading sequence to unde
- `main.py:1047-1065` (conversation end trigger)
- `main.py:1163-1195` (background processing)
-### 4. **[Action Items System](./action-items.md)**
-**Real-time task extraction and management**
-- How action items are detected and extracted
-- MongoDB storage and CRUD operations
-- Trigger phrases and configuration
-- **Code References**:
- - `src/action_items_service.py` (primary handler)
- - `main.py:1341-1378` (real-time processing)
- - `main.py:2671-2800` (API endpoints)
-
-### 5. **[Authentication System](./auth.md)**
+### 4. **[Authentication System](./auth.md)**
**User management and security**
- Dual authentication (email + user_id)
- JWT tokens and OAuth integration
@@ -60,7 +50,7 @@ Welcome to friend-lite! This guide provides the optimal reading sequence to unde
## đ **Advanced Topics**
-### 6. **Memory Debug System** â `../MEMORY_DEBUG_IMPLEMENTATION.md`
+### 5. **Memory Debug System** â `../MEMORY_DEBUG_IMPLEMENTATION.md`
**Pipeline tracking and debugging**
- How to track transcript â memory conversion
- Debug database schema and API endpoints
@@ -70,21 +60,13 @@ Welcome to friend-lite! This guide provides the optimal reading sequence to unde
- `src/memory_debug_api.py` (debug endpoints)
- `main.py:1562-1563` (debug router integration)
-### 7. **Action Items Architecture** â `../ACTION_ITEMS_CLEANUP_SUMMARY.md`
-**Clean architecture explanation**
-- Why action items were moved out of memory service
-- Current single-responsibility design
-- How components interact
-- **Code References**: `src/action_items_service.py` vs removed functions
-
---
## đ **Configuration & Customization**
-### 8. **Configuration File** â `../memory_config.yaml`
+### 6. **Configuration File** â `../memory_config.yaml`
**Central configuration for all extraction**
- Memory extraction settings and prompts
-- Action item triggers and configuration
- Quality control and debug settings
- **Code References**:
- `src/memory_config_loader.py` (config loading)
@@ -106,12 +88,6 @@ Welcome to friend-lite! This guide provides the optimal reading sequence to unde
3. `src/memory/memory_service.py` - Implementation
4. `main.py:1047-1065, 1163-1195` - Processing triggers
-### **"I want to work on action items"**
-1. [action-items.md](./action-items.md) - Action items system
-2. `../memory_config.yaml` - Action item configuration
-3. `src/action_items_service.py` - Implementation
-4. `main.py:1341-1378` - Real-time processing
-
### **"I want to debug pipeline issues"**
1. `../MEMORY_DEBUG_IMPLEMENTATION.md` - Debug system overview
2. `src/memory_debug.py` - Debug tracking implementation
@@ -142,7 +118,6 @@ backends/advanced-backend/
â âââ main.py # Core application (WebSocket, API)
â âââ auth.py # Authentication system
â âââ users.py # User management
-â âââ action_items_service.py # Action items (MongoDB)
â âââ memory/
â â âââ memory_service.py # Memory system (Mem0)
â âââ memory_debug.py # Debug tracking (SQLite)
@@ -162,11 +137,9 @@ backends/advanced-backend/
- **Entry**: WebSocket endpoints in `main.py:1562+`
- **Transcription**: `main.py:1258-1340` (transcription processor)
- **Memory Trigger**: `main.py:1047-1065` (conversation end)
-- **Action Items**: `main.py:1341-1378` (real-time processing)
### **Data Storage**
- **Memories**: `src/memory/memory_service.py` â Mem0 â Qdrant
-- **Action Items**: `src/action_items_service.py` â MongoDB
- **Debug Data**: `src/memory_debug.py` â SQLite
### **Configuration**
@@ -186,7 +159,7 @@ backends/advanced-backend/
1. **Follow the references**: Each doc links to specific code files and line numbers
2. **Use the debug API**: `GET /api/debug/memory/stats` shows live system status
3. **Check configuration first**: Many behaviors are controlled by `memory_config.yaml`
-4. **Understand the dual pipeline**: Memories (end-of-conversation) vs Action Items (real-time)
+4. **Understand the memory pipeline**: Memories (end-of-conversation)
5. **Test with curl**: All API endpoints have curl examples in the docs
---
@@ -206,7 +179,7 @@ backends/advanced-backend/
- **Add code references**: When updating docs, include file paths and line numbers
- **Test your changes**: Use the debug API to verify your modifications work
- **Update configuration**: Add new settings to `memory_config.yaml` when needed
-- **Follow the architecture**: Keep memories and action items in their respective services
+- **Follow the architecture**: Keep memories in their respective services
### **Getting Help**
diff --git a/backends/advanced-backend/Docs/action-items.md b/backends/advanced-backend/Docs/action-items.md
deleted file mode 100644
index ed27adcd..00000000
--- a/backends/advanced-backend/Docs/action-items.md
+++ /dev/null
@@ -1,345 +0,0 @@
-# Action Items Configuration and Usage
-
-> đ **Prerequisite**: Read [quickstart.md](./quickstart.md) first for system overview.
-
-## Overview
-
-The friend-lite backend includes a comprehensive action items system that automatically extracts tasks and commitments from conversations. This system operates in **real-time** alongside the memory extraction system, providing immediate task detection and management capabilities.
-
-**Code References**:
-- **Main Implementation**: `src/action_items_service.py` (MongoDB-based storage and processing)
-- **Real-time Processing**: `main.py:1341-1378` (per-transcript-segment processing)
-- **API Endpoints**: `main.py:2671-2800` (action items CRUD operations)
-- **Configuration**: `memory_config.yaml` (action_item_extraction section)
-
-## Architecture
-
-### Dual Processing System
-
-The action items system operates in parallel with memory extraction:
-
-```
-Audio â Transcription â Dual Processing
- ââ Memory Pipeline (end-of-conversation)
- ââ Action Item Pipeline (real-time per-segment)
-```
-
-### Key Components
-
-1. **Real-time Detection**: Each transcript segment is checked for action item triggers
-2. **Configurable Extraction**: YAML-based configuration for prompts and triggers
-3. **MongoDB Storage**: Action items stored in dedicated collection with full CRUD
-4. **Debug Tracking**: SQLite-based tracking of extraction process
-5. **User-Centric Design**: All action items keyed by user_id, not client_id
-
-### Architecture Cleanup
-
-**Previous Issue**: The system had duplicated action item processing in two places:
-- `ActionItemsService` (MongoDB-based, primary handler)
-- `MemoryService` (Mem0-based, unused legacy code)
-
-**Current Architecture**: Clean separation of concerns:
-- **`ActionItemsService`**: Handles ALL action item operations (MongoDB-based)
-- **`MemoryService`**: Handles ONLY memory operations (Mem0-based)
-- **Debug System**: Tracks both memories and action items in unified SQLite database
-
-## Configuration
-
-### Basic Configuration (`memory_config.yaml`)
-
-**Configuration Loading**: See `src/memory_config_loader.py:get_action_item_extraction_config()` for how this configuration is loaded and used.
-
-```yaml
-action_item_extraction:
- # Enable/disable action item extraction
- enabled: true
-
- # Trigger phrases that indicate action items
- trigger_phrases:
- - "simon says" # Primary trigger (case-insensitive)
- - "action item" # Explicit action item
- - "todo" # Simple todo
- - "follow up" # Follow-up tasks
- - "next step" # Next steps
- - "homework" # Assignments
- - "deliverable" # Project deliverables
- - "deadline" # Time-sensitive tasks
- - "schedule" # Scheduling tasks
- - "reminder" # Reminders
-
- # LLM extraction prompt
- prompt: |
- Extract actionable tasks and commitments from this conversation.
-
- Look for:
- - Explicit commitments ("I'll send you the report")
- - Requested actions ("Can you review the document?")
- - Scheduled tasks ("We need to meet next week")
- - Follow-up items ("Let's check on this tomorrow")
- - Deliverables mentioned ("The presentation is due Friday")
-
- For each action item, determine:
- - What needs to be done (clear, specific description)
- - Who is responsible (assignee)
- - When it's due (deadline if mentioned)
- - Priority level (high/medium/low)
-
- Return ONLY valid JSON array. If no action items found, return [].
-
- Example format:
- [
- {
- "description": "Send project status report to team",
- "assignee": "John",
- "due_date": "Friday",
- "priority": "high",
- "context": "Discussed in weekly team meeting"
- }
- ]
-
- # LLM settings for action item extraction
- llm_settings:
- temperature: 0.1 # Low temperature for consistent extraction
- max_tokens: 1000 # Sufficient for multiple action items
- model: "llama3.1:latest" # Can be overridden by environment
-```
-
-### Advanced Configuration
-
-```yaml
-action_item_extraction:
- enabled: true
-
- # Enhanced trigger detection
- trigger_phrases:
- - "simon says"
- - "action item"
- - "i need to"
- - "we should"
- - "let's"
- - "can you"
- - "please"
- - "remember to"
- - "don't forget"
- - "make sure"
-
- # Custom extraction prompt with specific instructions
- prompt: |
- You are an expert task manager. Extract actionable items from this conversation.
-
- Focus on:
- 1. Specific commitments with clear ownership
- 2. Time-bound tasks with deadlines
- 3. Follow-up actions requiring completion
- 4. Deliverables with clear outcomes
-
- For each action item, provide:
- - description: Clear, specific task description
- - assignee: Person responsible (use "unassigned" if unclear)
- - due_date: Deadline if mentioned (use "not_specified" if not clear)
- - priority: Based on urgency (high/medium/low/not_specified)
- - context: Brief context about when/why this was mentioned
-
- Return ONLY valid JSON array. Empty array if no action items found.
-
- # Fine-tuned LLM parameters
- llm_settings:
- temperature: 0.05 # Very low for consistent extraction
- max_tokens: 1500 # More tokens for detailed extraction
- model: "llama3.1:latest"
-```
-
-## Usage Examples
-
-### Trigger Phrase Examples
-
-The system detects action items when trigger phrases are present:
-
-```
-â
"Simon says we need to schedule a follow-up meeting"
-â
"Action item: John will send the report by Friday"
-â
"Todo: Review the contract before tomorrow"
-â
"Follow up with the client about their requirements"
-â
"Next step is to finalize the budget proposal"
-â
"Can you please update the documentation?"
-â
"Let's schedule a review meeting for next week"
-â
"Don't forget to submit the quarterly report"
-```
-
-### Action Item Data Structure
-
-```json
-{
- "description": "Send project status report to team",
- "assignee": "John Smith",
- "due_date": "Friday, December 15th",
- "priority": "high",
- "status": "open",
- "context": "Discussed in weekly team meeting",
- "audio_uuid": "audio_12345",
- "client_id": "user1-laptop",
- "user_id": "user1",
- "created_at": 1703548800,
- "updated_at": 1703548800
-}
-```
-
-## API Endpoints
-
-### Action Items Management
-
-**API Implementation**: See `main.py:2671-2800` for complete CRUD endpoint implementations.
-
-```bash
-# Get user's action items
-GET /api/action_items?status=open&limit=20
-
-# Get specific action item
-GET /api/action_items/{action_item_id}
-
-# Update action item status
-PUT /api/action_items/{action_item_id}
-Content-Type: application/json
-{
- "status": "completed"
-}
-
-# Search action items
-GET /api/action_items/search?query=report&status=open
-
-# Delete action item
-DELETE /api/action_items/{action_item_id}
-```
-
-### Debug & Monitoring
-
-```bash
-# View action item extraction stats
-GET /api/debug/memory/stats
-
-# View recent action item sessions
-GET /api/debug/memory/sessions
-
-# Debug specific session
-GET /api/debug/memory/session/{audio_uuid}
-
-# View pipeline trace
-GET /api/debug/memory/pipeline/{audio_uuid}
-```
-
-## Debug Tracking
-
-The system tracks all action item extraction attempts:
-
-### What's Tracked
-
-- **Extraction Attempts**: Success/failure of each extraction
-- **Processing Time**: How long each extraction takes
-- **Prompt Used**: Which prompt was used for extraction
-- **LLM Model**: Which model performed the extraction
-- **Transcript Length**: Size of input text
-- **Error Details**: Specific error messages for failed extractions
-
-### Debug Database Schema
-
-```sql
--- Action item extractions are stored as memory_extractions with type='action_item'
-SELECT
- audio_uuid,
- memory_text,
- extraction_prompt,
- metadata_json,
- created_at
-FROM memory_extractions
-WHERE memory_type = 'action_item';
-
--- Processing attempts show success/failure patterns
-SELECT
- audio_uuid,
- attempt_type,
- success,
- error_message,
- processing_time_ms
-FROM extraction_attempts
-WHERE attempt_type = 'action_item_extraction';
-```
-
-## Performance Optimization
-
-### Configuration Tips
-
-1. **Adjust Trigger Phrases**: Add domain-specific triggers for your use case
-2. **Tune LLM Parameters**: Lower temperature for consistency, higher for creativity
-3. **Optimize Prompts**: Include examples specific to your workflow
-4. **Monitor Processing Time**: Use debug endpoints to identify bottlenecks
-
-### Quality Control
-
-```yaml
-quality_control:
- # Skip very short transcripts
- min_conversation_length: 10
-
- # Skip transcripts with low meaningful content
- skip_low_content: true
- min_content_ratio: 0.2
-
- # Skip common filler patterns
- skip_patterns:
- - "^(um|uh|hmm|yeah|ok|okay)\\s*$"
- - "^test\\s*$"
-```
-
-## Integration with Memory System
-
-Action items and memories work together:
-
-1. **Shared Debug Tracking**: Both use the same SQLite debug database
-2. **Coordinated Processing**: Both respect the same quality control settings
-3. **User-Centric Storage**: Both keyed by user_id for proper isolation
-4. **Unified Configuration**: Single YAML file controls both systems
-
-## Troubleshooting
-
-### Common Issues
-
-1. **No Action Items Detected**
- - Check if trigger phrases are present in transcript
- - Verify `action_item_extraction.enabled: true` in config
- - Check debug logs for extraction attempts
-
-2. **JSON Parsing Errors**
- - Review extraction prompt for clarity
- - Lower LLM temperature for more consistent output
- - Check debug database for exact error messages
-
-3. **Performance Issues**
- - Monitor processing times in debug stats
- - Adjust `max_tokens` and `temperature` settings
- - Consider using quality control to filter low-value transcripts
-
-### Debug Commands
-
-```bash
-# Test action item configuration
-curl -H "Authorization: Bearer $TOKEN" \
- "http://localhost:8000/api/debug/memory/config/test?test_text=Simon%20says%20we%20need%20to%20schedule%20a%20meeting"
-
-# View extraction statistics
-curl -H "Authorization: Bearer $TOKEN" \
- http://localhost:8000/api/debug/memory/stats
-
-# Check recent processing
-curl -H "Authorization: Bearer $TOKEN" \
- http://localhost:8000/api/debug/memory/sessions?limit=10
-```
-
-## Best Practices
-
-1. **Use Specific Trigger Phrases**: Add domain-specific triggers for your use case
-2. **Test Prompts Regularly**: Use the debug API to test prompt effectiveness
-3. **Monitor Performance**: Check debug stats for processing times and success rates
-4. **Customize for Your Workflow**: Adjust prompts and triggers based on your conversation patterns
-5. **Regular Configuration Updates**: Reload configuration without restart using the API
-
-This action items system provides comprehensive task management capabilities with full configurability and debugging support, integrating seamlessly with the memory extraction pipeline.
\ No newline at end of file
diff --git a/backends/advanced-backend/Docs/architecture.md b/backends/advanced-backend/Docs/architecture.md
index 0ac47b79..ea62ca0b 100644
--- a/backends/advanced-backend/Docs/architecture.md
+++ b/backends/advanced-backend/Docs/architecture.md
@@ -4,7 +4,7 @@
## System Overview
-Friend-Lite is a comprehensive real-time conversation processing system that captures audio streams, performs speech-to-text transcription, extracts memories, and generates action items. The system features a FastAPI backend with WebSocket audio streaming, a Streamlit web dashboard for management, and complete user authentication with role-based access control.
+Friend-Lite is a comprehensive real-time conversation processing system that captures audio streams, performs speech-to-text transcription, and extracts memories. The system features a FastAPI backend with WebSocket audio streaming, a Streamlit web dashboard for management, and complete user authentication with role-based access control.
**Core Implementation**: The complete system is implemented in `src/main.py` with supporting services in dedicated modules.
@@ -34,7 +34,6 @@ graph TB
%% Business Logic Services
subgraph "Intelligence Services"
- ActionItems[action_items_service.py
Task Extraction]
Memory[memory/
Conversation Memory]
Metrics[metrics.py
System Monitoring]
end
@@ -81,9 +80,7 @@ graph TB
Main -->|/api/* endpoints| WebUI
%% Service Integration
- Transcription -->|Transcript Text| ActionItems
Transcription -->|Conversation Data| Memory
- ActionItems -->|Tasks| MongoDB
Memory -->|Memory Storage| Ollama
Memory -->|Vector Storage| Qdrant
@@ -91,7 +88,6 @@ graph TB
Main -->|User & Conversation Data| MongoDB
Transcription -->|Speech Processing| ASR
Memory -->|Embeddings| Qdrant
- ActionItems -->|LLM Analysis| Ollama
%% Monitoring & Metrics
Main -->|System Metrics| Metrics
@@ -113,7 +109,7 @@ graph TB
- **Authentication-First Design**: All endpoints require JWT authentication
- **WebSocket Audio Streaming**: Real-time Opus/PCM audio ingestion with per-client isolation (`main.py:1562+`)
- **Conversation Management**: Automatic conversation lifecycle with timeout handling (`main.py:1018-1149`)
-- **REST API Suite**: Comprehensive endpoints for user, conversation, memory, and action item management (`main.py:1700+`)
+- **REST API Suite**: Comprehensive endpoints for user, conversation, and memory management (`main.py:1700+`)
- **Health Monitoring**: Detailed service health checks and performance metrics (`main.py:2500+`)
- **Audio Cropping**: Intelligent speech segment extraction using FFmpeg (`main.py:174-200`)
@@ -132,7 +128,7 @@ graph TB
- **User-Friendly Interface**: Complete web-based management interface
- **Authentication Integration**: Login with backend JWT tokens or Google OAuth
- **Real-Time Monitoring**: Live client status and conversation management
-- **Data Management**: User, conversation, memory, and action item interfaces
+- **Data Management**: User, conversation, and memory interfaces
- **Audio Playback**: Smart audio player with original/cropped audio options
- **System Health**: Visual service status and configuration display
@@ -160,7 +156,6 @@ The system implements a dual transcription approach with Deepgram as primary and
```python
# Clean dependency injection pattern
TranscriptionManager(
- action_item_callback=callback_func,
chunk_repo=database_repo,
# Uses get_client_manager() singleton for client state access
)
@@ -179,7 +174,6 @@ client_manager = get_client_manager()
client_state = ClientState(
client_id="user_id_suffix-device_name",
chunk_repo=database_repo,
- action_items_service=action_service,
chunk_dir=audio_storage_path
)
```
@@ -218,7 +212,6 @@ stateDiagram-v2
- **Chunk Queue**: Raw audio buffering with client isolation
- **Transcription Queue**: Audio chunks for real-time ASR processing with quality validation
- **Memory Queue**: Completed conversations for LLM memory extraction (with transcript validation)
-- **Action Item Queue**: Transcript analysis for task detection
- **Quality Control**: Multi-stage validation prevents empty/invalid transcripts from consuming LLM resources
#### Speech Processing Features
@@ -229,15 +222,6 @@ stateDiagram-v2
### Intelligence Services
-#### Action Items Service (`action_items_service.py`)
-- **User-Centric Storage**: Action items stored with database user_id (not client_id)
-- **LLM-Powered Extraction**: Uses Ollama for intelligent task identification
-- **Trigger Recognition**: Special "Simon says" keyphrase detection for explicit task creation
-- **Task Management**: Full CRUD operations with status tracking (open, in-progress, completed, cancelled)
-- **Client Metadata**: Client and user information stored for reference and debugging
-- **Context Preservation**: Links action items to original conversations and audio segments
-
-> đ **Read more**: [Action Items Documentation](./action-items.md) for detailed task extraction features
#### Memory Management (`src/memory/memory_service.py`)
- **User-Centric Storage**: All memories keyed by database user_id (not client_id)
@@ -264,14 +248,13 @@ stateDiagram-v2
- **Authentication Data**: Secure password hashing, email verification, email-based login
- **Profile Management**: User preferences, display names, and permissions
- **Client Registration**: Tracking of registered clients per user with device names
-- **Data Ownership**: All data (conversations, memories, action items) associated via user_id
+- **Data Ownership**: All data (conversations, memories) associated via user_id
- **Client ID Generation**: Helper functions for `objectid_suffix-device_name` format
#### Conversation Data Access (`ChunkRepo`)
- **Audio Metadata**: File paths, timestamps, duration tracking
- **Transcript Management**: Speaker identification and timing information
- **Memory Links**: Connection between conversations and extracted memories
-- **Action Item Relations**: Task tracking per conversation
#### Permission System
- **Dictionary-Based Mapping**: Clean client-user relationship tracking via in-memory dictionaries
@@ -334,6 +317,7 @@ graph LR
#### Infrastructure Containers
- **MongoDB 4.4.18**: Primary data storage with persistence
- **Qdrant Latest**: Vector database for memory embeddings
+- **Neo4j 5.15**: Graph database for memory relationships and entity connections
- **Nginx Alpine**: Reverse proxy and load balancing
## Detailed Data Flow Architecture
@@ -385,10 +369,6 @@ flowchart TB
VectorStore[Qdrant Vector Store
đ Semantic search]
end
- subgraph "â
Action Items Pipeline"
- ActionService[Action Items Service
đ "Simon says" detection]
- TaskExtraction[Task Extraction
đ¤ LLM-powered analysis]
- end
end
%% Failure Recovery System
@@ -441,10 +421,6 @@ flowchart TB
LLMProcessor -->|â
Memory extracted| VectorStore
MemoryService -->|đ Track processing| QueueTracker
- %% Action Items Flow
- TranscriptValidation -->|đ Valid transcript| ActionService
- ActionService -->|đ "Simon says" detected| TaskExtraction
- TaskExtraction -->|â
Task extracted| MongoDB
%% Failure Recovery Integration
QueueTracker -->|đ Track all items| PersistentQueue
@@ -465,7 +441,6 @@ flowchart TB
VectorStore -->|đž Embeddings| QdrantDB
QueueTracker -->|đ Metrics & tracking| SQLiteTracking
ClientState -->|đ Audio segments| AudioFiles
- ActionService -->|đ Tasks| MongoDB
%% Web Dashboard Flow
WebUI -->|đ Cookie/JWT auth
đ 1hr lifetime| AuthGW
@@ -515,9 +490,8 @@ flowchart TB
3. **User Resolution**: Client-ID to database user mapping for proper data association
4. **LLM Processing**: Ollama-based conversation summarization with user context (only for validated transcripts)
5. **Vector Storage**: Semantic embeddings stored in Qdrant keyed by user_id
-6. **Action Item Analysis**: Automatic task detection with user-centric storage
-7. **Metadata Enhancement**: Client information and user email stored in metadata
-8. **Search & Retrieval**: User-scoped semantic memory search capabilities
+6. **Metadata Enhancement**: Client information and user email stored in metadata
+7. **Search & Retrieval**: User-scoped semantic memory search capabilities
### User Management & Security
1. **Registration**: Admin-controlled user creation with email/password and auto-generated user_id
@@ -546,7 +520,6 @@ flowchart TB
| System Administration | Health Check Only | Full Access |
| Active Client Management | Own Clients Only | All Clients |
| Memory Management | Own Memories Only | All Memories (with client info) |
-| Action Items | Own Items Only | All Items (with client info) |
### Data Protection
- **Encryption**: JWT token signing with configurable secret keys
@@ -575,6 +548,11 @@ OLLAMA_BASE_URL=http://ollama:11434
# Vector Storage
QDRANT_BASE_URL=qdrant
+# Graph Storage for Memory Relationships
+NEO4J_HOST=neo4j-mem0
+NEO4J_USER=neo4j
+NEO4J_PASSWORD=your-neo4j-password
+
# Transcription Services (Deepgram Primary, Wyoming Fallback)
DEEPGRAM_API_KEY=your-deepgram-api-key-here
OFFLINE_ASR_TCP_URI=tcp://host.docker.internal:8765
@@ -584,12 +562,13 @@ OFFLINE_ASR_TCP_URI=tcp://host.docker.internal:8765
### Service Dependencies
#### Critical Services (Required for Core Functionality)
-- **MongoDB**: User data, conversations, action items
+- **MongoDB**: User data, conversations
- **Authentication**: JWT token validation and user sessions
#### Enhanced Services (Optional but Recommended)
-- **Ollama**: Memory processing and action item extraction
+- **Ollama**: Memory processing
- **Qdrant**: Vector storage for semantic memory search
+- **Neo4j**: Graph database for memory relationships and entity connections
- **Deepgram**: Primary speech-to-text transcription service (WebSocket streaming)
- **Wyoming ASR**: Fallback transcription service (self-hosted)
@@ -619,7 +598,6 @@ The system provides a comprehensive REST API organized into functional modules:
âââ /memories # Memory management and search
â âââ /admin # Admin view (all users)
â âââ /search # Semantic memory search
-âââ /action_items # Task management
âââ /admin/ # Admin compatibility endpoints
â âââ /memories # Consolidated admin memory view
â âââ /memories/debug # Legacy debug endpoint
diff --git a/backends/advanced-backend/Docs/contribution.md b/backends/advanced-backend/Docs/contribution.md
index 488d3b19..dd645eca 100644
--- a/backends/advanced-backend/Docs/contribution.md
+++ b/backends/advanced-backend/Docs/contribution.md
@@ -12,14 +12,6 @@
5. src/memory/memory_service.py
6. src/memory_debug.py (for tracking)
- đ "I want to work on action items"
-
- 1. Docs/quickstart.md â Docs/action-items.md
- 2. memory_config.yaml (action_item_extraction section)
- 3. main.py lines 1341-1378 (real-time processing)
- 4. src/action_items_service.py
- 5. ACTION_ITEMS_CLEANUP_SUMMARY.md (architecture)
-
đ "I want to debug pipeline issues"
1. MEMORY_DEBUG_IMPLEMENTATION.md
@@ -33,12 +25,11 @@
2. main.py (full file, focusing on class structures)
3. src/auth.py (authentication flow)
4. src/users.py (user management)
- 5. All service files (memory_service.py, action_items_service.py)
+ 5. All service files (memory_service.py)
đ¯ Key Concepts to Understand
Data Flow
Audio â Transcription â Dual Processing
- ââ Memory Pipeline (end-of-conversation)
- ââ Action Item Pipeline (real-time per-segment)
\ No newline at end of file
+ ââ Memory Pipeline (end-of-conversation)
\ No newline at end of file
diff --git a/backends/advanced-backend/Docs/quickstart.md b/backends/advanced-backend/Docs/quickstart.md
index 0c20070f..8cb039e8 100644
--- a/backends/advanced-backend/Docs/quickstart.md
+++ b/backends/advanced-backend/Docs/quickstart.md
@@ -19,7 +19,7 @@ At the moment, the basic functionalities are:
- Docker and Docker Compose
- (Optional) Deepgram API key for high-quality cloud transcription
-- (Optional) Ollama for local LLM processing (memory extraction, action items)
+- (Optional) Ollama for local LLM processing (memory extraction)
- (Optional) Wyoming ASR for offline speech-to-text processing
## Quick Start
@@ -95,7 +95,6 @@ For self-hosted speech recognition, see instructions in `extras/asr-services/`:
- **Conversations**: View audio recordings, transcripts, and cropped audio
- **Memories**: Search extracted conversation memories
-- **Action Items**: Manage automatically detected tasks
- **User Management**: Create/delete users and their data
- **Client Management**: View active connections and close conversations
@@ -221,19 +220,17 @@ curl -X POST "http://localhost:8000/api/process-audio-files" \
### Memory & Intelligence
- **Enhanced Memory Extraction**: Improved fact extraction with granular, specific memories instead of generic transcript storage
-- **User-centric storage**: All memories and action items keyed by database user_id
+- **User-centric storage**: All memories keyed by database user_id
- **Memory extraction**: Automatic conversation summaries using LLM with enhanced prompts
- **Semantic search**: Vector-based memory retrieval
-- **Action item detection**: Automatic task extraction with "Simon says" triggers
-- **Configurable extraction**: YAML-based configuration for memory and action item extraction
-- **Debug tracking**: SQLite-based tracking of transcript â memory/action item conversion
+- **Configurable extraction**: YAML-based configuration for memory extraction
+- **Debug tracking**: SQLite-based tracking of transcript â memory conversion
- **Client metadata**: Device information preserved for debugging and reference
- **User isolation**: All data scoped to individual users with multi-device support
- **No more fallbacks**: System now creates proper memories instead of generic transcript placeholders
**Implementation**:
- **Memory System**: `src/memory/memory_service.py` + `main.py:1047-1065, 1163-1195`
-- **Action Items**: `src/action_items_service.py` + `main.py:1341-1378`
- **Configuration**: `memory_config.yaml` + `src/memory_config_loader.py`
- **Debug Tracking**: `src/memory_debug.py` + API endpoints at `/api/debug/memory/*`
@@ -320,7 +317,7 @@ uv sync --group (whatever group you want to sync)
The friend-lite backend uses a **user-centric data architecture**:
-- **All memories and action items are keyed by database user_id** (not client_id)
+- **All memories are keyed by database user_id** (not client_id)
- **Client information is stored in metadata** for reference and debugging
- **User email is included** for easy identification in admin interfaces
- **Multi-device support**: Users can access their data from any registered device
@@ -329,7 +326,7 @@ For detailed information, see [User Data Architecture](user-data-architecture.md
## Memory & Action Item Configuration
-The system uses **centralized configuration** via `memory_config.yaml` for all memory and action item extraction settings. All hardcoded values have been removed from the code to ensure consistent, configurable behavior.
+The system uses **centralized configuration** via `memory_config.yaml` for all memory extraction settings. All hardcoded values have been removed from the code to ensure consistent, configurable behavior.
### Configuration File Location
- **Path**: `backends/advanced-backend/memory_config.yaml`
@@ -383,23 +380,6 @@ fact_extraction:
temperature: 0.0 # Lower for factual accuracy
max_tokens: 1500
-action_item_extraction:
- enabled: true
- # RECOMMENDATION: Works best with OpenAI GPT-4o for reliable JSON parsing
- trigger_phrases:
- - "simon says"
- - "action item"
- - "todo"
- - "follow up"
- - "next step"
- - "homework"
- - "deliverable"
- - "task"
- - "assignment"
- llm_settings:
- model: "gemma3n:e4b" # Auto-switches based on LLM_PROVIDER
- temperature: 0.1
- max_tokens: 1000
```
**Provider-Specific Behavior:**
@@ -409,7 +389,7 @@ action_item_extraction:
#### Fixing JSON Parsing Errors
-If you experience JSON parsing errors in action items or fact extraction:
+If you experience JSON parsing errors in fact extraction:
1. **Switch to OpenAI GPT-4o** (recommended solution):
```bash
diff --git a/backends/advanced-backend/docker-compose.yml b/backends/advanced-backend/docker-compose.yml
index 080877c6..2b1833ef 100644
--- a/backends/advanced-backend/docker-compose.yml
+++ b/backends/advanced-backend/docker-compose.yml
@@ -5,6 +5,8 @@ services:
dockerfile: Dockerfile
ports:
- "8000:8000"
+ env_file:
+ - .env
volumes:
- ./audio_chunks:/app/audio_chunks
- ./debug_dir:/app/debug_dir
@@ -20,12 +22,18 @@ services:
- AUTH_SECRET_KEY=${AUTH_SECRET_KEY}
- LLM_PROVIDER=${LLM_PROVIDER}
- OPENAI_API_KEY=${OPENAI_API_KEY}
+ - OPENAI_BASE_URL=${OPENAI_BASE_URL}
- OPENAI_MODEL=${OPENAI_MODEL}
+ - NEO4J_HOST=${NEO4J_HOST}
+ - NEO4J_USER=${NEO4J_USER}
+ - NEO4J_PASSWORD=${NEO4J_PASSWORD}
depends_on:
qdrant:
condition: service_started
mongo:
condition: service_started
+ neo4j-mem0:
+ condition: service_started
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/readiness"]
interval: 10s
@@ -112,6 +120,22 @@ services:
- "6334:6334" # HTTP
volumes:
- ./qdrant_data:/qdrant/storage
+
+ neo4j-mem0:
+ image: neo4j:5.15-community
+ ports:
+ - "7474:7474" # HTTP
+ - "7687:7687" # Bolt
+ environment:
+ - NEO4J_AUTH=neo4j/${NEO4J_PASSWORD:-password}
+ - NEO4J_PLUGINS=["apoc"]
+ - NEO4J_dbms_security_procedures_unrestricted=apoc.*
+ - NEO4J_dbms_security_procedures_allowlist=apoc.*
+ volumes:
+ - ./neo4j_data:/data
+ - ./neo4j_logs:/logs
+ restart: unless-stopped
+
mongo:
image: mongo:4.4.18
ports:
@@ -132,8 +156,12 @@ services:
# Question: These are named volumes, but they are not being used, right? Can we remove them?
-volumes:
- ollama_data:
- driver: local
- mongo_data:
- driver: local
\ No newline at end of file
+# volumes:
+# ollama_data:
+# driver: local
+# mongo_data:
+# driver: local
+# neo4j_data:
+# driver: local
+# neo4j_logs:
+# driver: local
\ No newline at end of file
diff --git a/backends/advanced-backend/memory_config.yaml b/backends/advanced-backend/memory_config.yaml
index 226c3987..0bcff565 100644
--- a/backends/advanced-backend/memory_config.yaml
+++ b/backends/advanced-backend/memory_config.yaml
@@ -54,47 +54,6 @@ fact_extraction:
# model: "gemma3n:e4b" # Model based on LLM_PROVIDER (ollama/openai)
model: "gpt-4o"
-# Action item extraction settings
-action_item_extraction:
- # Whether to extract action items from conversations
- # RECOMMENDATION: Works best with OpenAI GPT-4o for reliable JSON parsing
- enabled: true
-
- # Trigger phrases that indicate action items in conversation
- trigger_phrases:
- - "simon says"
- - "action item"
- - "todo"
- - "follow up"
- - "next step"
- - "homework"
- - "deliverable"
- - "task"
- - "assignment"
- - "need to"
- - "should do"
- - "remember to"
-
- # Prompt for extracting action items
- prompt: |
- Extract action items from this conversation. Look for tasks, assignments, or things that need to be done.
-
- Return a JSON array where each item has:
- - description: What needs to be done
- - assignee: Who should do it ("unassigned" if unclear)
- - due_date: When it should be done ("not_specified" if not mentioned)
- - priority: high/medium/low/not_specified
- - context: Why or how this task came up
- - tool: Required tool ("check_email", "set_alarm", "none")
-
- Return only valid JSON. No explanations or extra text.
-
- # LLM parameters for action item extraction
- llm_settings:
- temperature: 0.1
- max_tokens: 1000
- # RECOMMENDATION: Use "gpt-4o" for reliable JSON output in action items
- model: "gpt-4o" # Model based on LLM_PROVIDER (ollama/openai)
# Memory categorization settings
categorization:
diff --git a/backends/advanced-backend/src/advanced_omi_backend/action_items_service.py b/backends/advanced-backend/src/advanced_omi_backend/action_items_service.py
deleted file mode 100644
index b9cbe59b..00000000
--- a/backends/advanced-backend/src/advanced_omi_backend/action_items_service.py
+++ /dev/null
@@ -1,614 +0,0 @@
-import asyncio
-import json
-import logging
-import re
-import time
-from concurrent.futures import ThreadPoolExecutor
-from datetime import datetime
-from typing import Any, Dict, List, Optional
-
-import ollama
-from motor.motor_asyncio import AsyncIOMotorCollection
-
-# Set up logging
-action_items_logger = logging.getLogger("action_items")
-
-# Timeout configurations
-OLLAMA_TIMEOUT_SECONDS = 30 # Timeout for Ollama operations
-EXTRACTION_TIMEOUT_SECONDS = 45 # Timeout for action item extraction
-
-# Thread pool for blocking operations
-_ACTION_EXECUTOR = ThreadPoolExecutor(max_workers=2, thread_name_prefix="action_ops")
-
-
-class ActionItemsService:
- """
- MongoDB-based action items service with full CRUD operations.
- Replaces the Mem0-based implementation for better update capabilities.
- """
-
- def __init__(self, collection: AsyncIOMotorCollection, ollama_client: ollama.Client):
- self.collection = collection
- self.ollama_client = ollama_client
- self._initialized = False
-
- async def initialize(self):
- """Initialize the service and create indexes for performance."""
- if self._initialized:
- return
-
- try:
- # Create indexes for better query performance
- await self.collection.create_index([("user_id", 1), ("created_at", -1)])
- await self.collection.create_index([("user_id", 1), ("status", 1)])
- await self.collection.create_index([("user_id", 1), ("assignee", 1)])
- await self.collection.create_index([("audio_uuid", 1)])
- await self.collection.create_index([("description", "text")]) # Text search index
-
- self._initialized = True
- action_items_logger.info("Action items service initialized with MongoDB")
- except Exception as e:
- action_items_logger.error(f"Failed to initialize action items service: {e}")
- raise
-
- async def process_transcript_for_action_items(
- self, transcript_text: str, client_id: str, audio_uuid: str, user_id: str, user_email: str
- ) -> int:
- """
- Process a transcript segment for action items with special keyphrase detection.
-
- This method:
- - Checks for the special keyphrase 'Simon says' (case-insensitive)
- - If found, processes the modified text for action item extraction
- - Returns the number of action items extracted and stored
- """
- if not self._initialized:
- await self.initialize()
-
- try:
- # Check for the special keyphrase 'simon says' (case-insensitive, any spaces or dots)
- keyphrase_pattern = re.compile(r"\bSimon says\b", re.IGNORECASE)
-
- if keyphrase_pattern.search(transcript_text):
- # Remove all occurrences of the keyphrase
- modified_text = keyphrase_pattern.sub("Simon says", transcript_text)
- action_items_logger.info(
- f"đ 'Simon says' keyphrase detected in transcript for {audio_uuid}. Extracting action items from: '{modified_text.strip()}'"
- )
-
- try:
- action_item_count = await self.extract_and_store_action_items(
- modified_text.strip(), client_id, audio_uuid, user_id, user_email
- )
- if action_item_count > 0:
- action_items_logger.info(
- f"đ¯ Extracted {action_item_count} action items from 'Simon says' transcript segment for {audio_uuid}"
- )
- else:
- action_items_logger.debug(
- f"âšī¸ No action items found in 'Simon says' transcript segment for {audio_uuid}"
- )
- return action_item_count
- except Exception as e:
- action_items_logger.error(
- f"â Error processing 'Simon says' action items for transcript segment in {audio_uuid}: {e}"
- )
- return 0
- else:
- # No keyphrase found, no action items to extract
- action_items_logger.debug(
- f"No 'Simon says' keyphrase found in transcript for {audio_uuid}"
- )
- return 0
-
- except Exception as e:
- action_items_logger.error(
- f"Error processing transcript for action items in {audio_uuid}: {e}"
- )
- return 0
-
- async def extract_and_store_action_items(
- self, transcript: str, client_id: str, audio_uuid: str, user_id: str, user_email: str
- ) -> int:
- """
- Extract action items from transcript and store them in MongoDB with timeout protection.
- Returns the number of action items extracted and stored.
- """
- if not self._initialized:
- await self.initialize()
-
- try:
- # Extract and store action items with overall timeout
- async def _extract_and_store():
- # Extract action items from the transcript
- action_items = await self._extract_action_items_from_transcript(
- transcript, client_id, audio_uuid
- )
-
- if not action_items:
- action_items_logger.info(
- f"No action items found in transcript for {audio_uuid}"
- )
- return 0
-
- # Store action items in MongoDB
- success_count = await self._store_action_items(
- action_items, client_id, audio_uuid, user_id, user_email
- )
-
- action_items_logger.info(
- f"Successfully extracted and stored {success_count}/{len(action_items)} action items for {audio_uuid}"
- )
- return success_count
-
- return await asyncio.wait_for(_extract_and_store(), timeout=EXTRACTION_TIMEOUT_SECONDS)
-
- except asyncio.TimeoutError:
- action_items_logger.error(
- f"Action item extraction and storage timed out after {EXTRACTION_TIMEOUT_SECONDS}s for {audio_uuid}"
- )
- return 0
- except Exception as e:
- action_items_logger.error(f"Error extracting action items for {audio_uuid}: {e}")
- return 0
-
- async def _extract_action_items_from_transcript(
- self, transcript: str, client_id: str, audio_uuid: str
- ) -> List[Dict[str, Any]]:
- """Extract action items from transcript using Ollama."""
- try:
- extraction_prompt = f"""
-<|begin_of_text|><|start_header_id|>system<|end_header_id|>
-You are an intelligent assistant that reads transcripts and extracts all potential action items, even informal or implied ones.
-
-Your output must be a **JSON array**, where action item includes:
-- description: A short summary of the task
-- assignee: Who should do it ("unassigned" if unclear)
-- due_date: When it should be done ("not_specified" if not mentioned)
-- priority: high / medium / low / not_specified
-- context: Why or how the task came up
-- tool: The name of the tool required, if any ("check_email", "check_calendar", "set_alarm"), or "none" if no tool is needed
-
-Rules:
-- Identify both explicit tasks and implied ones.
-- Suggest a tool only when the task obviously requires it or could be automated.
-- If it's a general human task with no clear automation, use `"none"` for tool.
-
-Return **only** a JSON array. No explanation or extra text.
-
-<|eot_id|>
-<|start_header_id|>user<|end_header_id|>
-Transcript:
-
-{transcript}
-
-<|eot_id|>
-<|start_header_id|>assistant<|end_header_id|>
-"""
-
- # Use Ollama API
- if self.ollama_client is None:
- action_items_logger.error(f"No Ollama client available for {audio_uuid}")
- return []
-
- def _ollama_generate():
- return self.ollama_client.generate(
- model="llama3.1:latest", prompt=extraction_prompt, options={"temperature": 0.1}
- )
-
- loop = asyncio.get_running_loop()
- response = await asyncio.wait_for(
- loop.run_in_executor(_ACTION_EXECUTOR, _ollama_generate),
- timeout=OLLAMA_TIMEOUT_SECONDS,
- )
-
- if response is None or "response" not in response:
- action_items_logger.error(f"Invalid Ollama response for {audio_uuid}")
- return []
-
- response_text = response["response"].strip()
-
- # Handle empty responses
- if not response_text or response_text.lower() in ["none", "no action items", "[]"]:
- return []
-
- # Parse JSON response
- action_items = json.loads(response_text)
-
- # Validate response format
- if not isinstance(action_items, list):
- action_items_logger.warning(
- f"Action item extraction returned non-list for {audio_uuid}: {type(action_items)}"
- )
- return []
-
- # Enrich each action item with metadata
- for i, item in enumerate(action_items):
- item.update(
- {
- "id": f"action_{audio_uuid}_{i}_{int(time.time())}",
- "status": "open",
- "created_at": int(time.time()),
- "updated_at": int(time.time()),
- "source": "transcript_extraction",
- }
- )
-
- # TODO: Handle all tools here, these can be imported from other files
- # Handle set_alarm tool, this can be another llm call to mcp with description as input
- # Also handle sending notification via app or TTS
- if item.get("tool") == "set_alarm":
- description = item.get("description", "")
- action_items_logger.info(
- f"Calling set alarm service with description: {description}"
- )
-
- action_items_logger.info(
- f"Extracted {len(action_items)} action items from {audio_uuid}"
- )
- return action_items
-
- except asyncio.TimeoutError:
- action_items_logger.error(
- f"Action item extraction timed out after {OLLAMA_TIMEOUT_SECONDS}s for {audio_uuid}"
- )
- return []
- except json.JSONDecodeError as e:
- action_items_logger.error(f"Failed to parse action items JSON for {audio_uuid}: {e}")
- return []
- except Exception as e:
- action_items_logger.error(
- f"Error extracting action items from transcript for {audio_uuid}: {e}"
- )
- return []
-
- async def _store_action_items(
- self,
- action_items: List[Dict[str, Any]],
- client_id: str,
- audio_uuid: str,
- user_id: str,
- user_email: str,
- ) -> int:
- """Store action items in MongoDB.
-
- Args:
- action_items: List of action item dictionaries
- client_id: The client ID that generated the audio
- audio_uuid: Unique identifier for the audio
- user_id: Database user ID to associate the action items with
- user_email: User email for identification
- """
- try:
- if not action_items:
- return 0
-
- # Prepare documents for insertion
- documents = []
- for item in action_items:
- document = {
- "action_item_id": item.get("id"),
- "user_id": user_id, # Use database user_id instead of client_id
- "client_id": client_id, # Store client_id for reference
- "user_email": user_email, # Store user email for easy identification
- "audio_uuid": audio_uuid,
- "description": item.get("description", ""),
- "assignee": item.get("assignee", "unassigned"),
- "due_date": item.get("due_date", "not_specified"),
- "priority": item.get("priority", "not_specified"),
- "status": item.get("status", "open"),
- "context": item.get("context", ""),
- "source": item.get("source", "transcript_extraction"),
- "created_at": item.get("created_at", int(time.time())),
- "updated_at": item.get("updated_at", int(time.time())),
- }
- documents.append(document)
-
- # Insert all action items
- result = await self.collection.insert_many(documents)
- success_count = len(result.inserted_ids)
-
- action_items_logger.info(f"Stored {success_count} action items for {audio_uuid}")
- return success_count
-
- except Exception as e:
- action_items_logger.error(f"Error storing action items for {audio_uuid}: {e}")
- return 0
-
- async def get_action_items(
- self, user_id: str, limit: int = 50, status_filter: Optional[str] = None
- ) -> List[Dict[str, Any]]:
- """Get action items for a user with optional status filtering."""
- if not self._initialized:
- await self.initialize()
-
- try:
- # Build query filter
- query = {"user_id": user_id}
- if status_filter:
- query["status"] = status_filter
-
- # Execute query with sorting (newest first)
- cursor = self.collection.find(query).sort("created_at", -1).limit(limit)
- action_items = []
-
- async for doc in cursor:
- # Convert MongoDB ObjectId to string and remove it
- doc["_id"] = str(doc["_id"])
- action_items.append(doc)
-
- action_items_logger.info(
- f"Retrieved {len(action_items)} action items for user {user_id} (status_filter: {status_filter})"
- )
- return action_items
-
- except Exception as e:
- action_items_logger.error(f"Error fetching action items for user {user_id}: {e}")
- return []
-
- async def update_action_item_status(
- self, action_item_id: str, new_status: str, user_id: Optional[str] = None
- ) -> bool:
- """Update the status of an action item."""
- if not self._initialized:
- await self.initialize()
-
- try:
- # Build query - use action_item_id or _id
- query = {}
- if action_item_id.startswith("action_"):
- query["action_item_id"] = action_item_id
- else:
- # Assume it's a MongoDB ObjectId
- from bson import ObjectId
-
- try:
- query["_id"] = ObjectId(action_item_id)
- except:
- query["action_item_id"] = action_item_id
-
- # Add user_id to query if provided for additional security
- if user_id:
- query["user_id"] = user_id
-
- # Update the document
- update_data = {"$set": {"status": new_status, "updated_at": int(time.time())}}
-
- result = await self.collection.update_one(query, update_data)
-
- if result.modified_count > 0:
- action_items_logger.info(
- f"Updated action item {action_item_id} status to {new_status}"
- )
- return True
- else:
- action_items_logger.warning(f"No action item found with id {action_item_id}")
- return False
-
- except Exception as e:
- action_items_logger.error(
- f"Error updating action item status for {action_item_id}: {e}"
- )
- return False
-
- async def search_action_items(
- self, query: str, user_id: str, limit: int = 20
- ) -> List[Dict[str, Any]]:
- """Search action items by text query using MongoDB text search."""
- if not self._initialized:
- await self.initialize()
-
- try:
- # Use MongoDB text search if available, otherwise regex search
- search_query = {
- "user_id": user_id,
- "$or": [
- {"description": {"$regex": query, "$options": "i"}},
- {"context": {"$regex": query, "$options": "i"}},
- {"assignee": {"$regex": query, "$options": "i"}},
- ],
- }
-
- cursor = self.collection.find(search_query).sort("created_at", -1).limit(limit)
- action_items = []
-
- async for doc in cursor:
- doc["_id"] = str(doc["_id"])
- action_items.append(doc)
-
- action_items_logger.info(
- f"Search found {len(action_items)} action items for query '{query}'"
- )
- return action_items
-
- except Exception as e:
- action_items_logger.error(f"Error searching action items for user {user_id}: {e}")
- return []
-
- async def delete_action_item(self, action_item_id: str, user_id: Optional[str] = None) -> bool:
- """Delete a specific action item."""
- if not self._initialized:
- await self.initialize()
-
- try:
- # Build query - use action_item_id or _id
- query = {}
- if action_item_id.startswith("action_"):
- query["action_item_id"] = action_item_id
- else:
- from bson import ObjectId
-
- try:
- query["_id"] = ObjectId(action_item_id)
- except:
- query["action_item_id"] = action_item_id
-
- # Add user_id to query if provided for additional security
- if user_id:
- query["user_id"] = user_id
-
- result = await self.collection.delete_one(query)
-
- if result.deleted_count > 0:
- action_items_logger.info(f"Deleted action item with id {action_item_id}")
- return True
- else:
- action_items_logger.warning(f"No action item found with id {action_item_id}")
- return False
-
- except Exception as e:
- action_items_logger.error(f"Error deleting action item {action_item_id}: {e}")
- return False
-
- async def create_action_item(
- self,
- user_id: str,
- description: str,
- assignee: str = "unassigned",
- due_date: str = "not_specified",
- priority: str = "medium",
- context: str = "",
- ) -> Optional[Dict[str, Any]]:
- """Create a new action item manually."""
- if not self._initialized:
- await self.initialize()
-
- try:
- current_time = int(time.time())
- action_item_id = f"manual_{user_id}_{current_time}"
-
- document = {
- "action_item_id": action_item_id,
- "user_id": user_id,
- "audio_uuid": None, # No associated conversation
- "description": description,
- "assignee": assignee,
- "due_date": due_date,
- "priority": priority,
- "status": "open",
- "context": context,
- "source": "manual_creation",
- "created_at": current_time,
- "updated_at": current_time,
- }
-
- result = await self.collection.insert_one(document)
-
- if result.inserted_id:
- document["_id"] = str(result.inserted_id)
- action_items_logger.info(
- f"Created manual action item {action_item_id} for user {user_id}"
- )
- return document
- else:
- action_items_logger.error(f"Failed to create action item for user {user_id}")
- return None
-
- except Exception as e:
- action_items_logger.error(f"Error creating action item for user {user_id}: {e}")
- return None
-
- async def get_action_item_stats(self, user_id: str) -> Dict[str, Any]:
- """Get comprehensive statistics for user's action items."""
- if not self._initialized:
- await self.initialize()
-
- try:
- # Use aggregation pipeline for statistics
- pipeline = [
- {"$match": {"user_id": user_id}},
- {
- "$group": {
- "_id": None,
- "total": {"$sum": 1},
- "by_status": {"$push": "$status"},
- "by_priority": {"$push": "$priority"},
- "by_assignee": {"$push": "$assignee"},
- }
- },
- ]
-
- result = await self.collection.aggregate(pipeline).to_list(length=1)
-
- if not result:
- return {
- "total": 0,
- "by_status": {},
- "by_priority": {},
- "by_assignee": {},
- "recent_count": 0,
- }
-
- data = result[0]
-
- # Count by status
- status_counts = {}
- for status in data["by_status"]:
- status_counts[status] = status_counts.get(status, 0) + 1
-
- # Count by priority
- priority_counts = {}
- for priority in data["by_priority"]:
- priority_counts[priority] = priority_counts.get(priority, 0) + 1
-
- # Count by assignee
- assignee_counts = {}
- for assignee in data["by_assignee"]:
- assignee_counts[assignee] = assignee_counts.get(assignee, 0) + 1
-
- # Get recent count (last 7 days)
- seven_days_ago = int(time.time()) - (7 * 24 * 60 * 60)
- recent_count = await self.collection.count_documents(
- {"user_id": user_id, "created_at": {"$gte": seven_days_ago}}
- )
-
- return {
- "total": data["total"],
- "by_status": status_counts,
- "by_priority": priority_counts,
- "by_assignee": assignee_counts,
- "recent_count": recent_count,
- }
-
- except Exception as e:
- action_items_logger.error(f"Error getting action item stats for user {user_id}: {e}")
- return {
- "total": 0,
- "by_status": {},
- "by_priority": {},
- "by_assignee": {},
- "recent_count": 0,
- }
-
-
-# import pyperclip
-# transcript = "set an alarm for 10am"
-# extraction_prompt = f"""
-# <|begin_of_text|><|start_header_id|>system<|end_header_id|>
-# You are an intelligent assistant that reads transcripts and extracts all potential action items, even informal or implied ones.
-
-# Your output must be a **JSON**, where action item includes:
-# - description: A short summary of the task
-# - assignee: Who should do it ("unassigned" if unclear)
-# - due_date: When it should be done ("not_specified" if not mentioned)
-# - priority: high / medium / low / not_specified
-# - context: Why or how the task came up
-# - tool: The name of the tool required, if any ("check_email", "check_calendar", "set_alarm"), or "none" if no tool is needed
-
-# Rules:
-# - Identify both explicit tasks and implied ones.
-# - Suggest a tool only when the task obviously requires it or could be automated.
-# - If it's a general human task with no clear automation, use `"none"` for tool.
-
-# Return **only** a JSON. No explanation or extra text.
-
-# <|eot_id|>
-# <|start_header_id|>user<|end_header_id|>
-# Transcript:
-#
-# {transcript}
-#
-# <|eot_id|>
-# <|start_header_id|>assistant<|end_header_id|>
-# """
-# pyperclip.copy(extraction_prompt)
diff --git a/backends/advanced-backend/src/advanced_omi_backend/client.py b/backends/advanced-backend/src/advanced_omi_backend/client.py
index 45b5658b..82ea4198 100644
--- a/backends/advanced-backend/src/advanced_omi_backend/client.py
+++ b/backends/advanced-backend/src/advanced_omi_backend/client.py
@@ -42,11 +42,10 @@
class ClientState:
"""Manages all state for a single client connection."""
- def __init__(self, client_id: str, audio_chunks_db_collection, action_items_service, chunk_dir: Path, user_id: Optional[str] = None, user_email: Optional[str] = None):
+ def __init__(self, client_id: str, ac_db_collection_helper, chunk_dir: Path, user_id: Optional[str] = None, user_email: Optional[str] = None):
self.client_id = client_id
self.connected = True
- self.chunk_repo = audio_chunks_db_collection
- self.action_items_service = action_items_service
+ self.db_helper = ac_db_collection_helper
self.chunk_dir = chunk_dir
# Store minimal user data needed for memory processing (avoids tight coupling to User model)
self.user_id = user_id
@@ -58,9 +57,6 @@ def __init__(self, client_id: str, audio_chunks_db_collection, action_items_serv
self.memory_queue = asyncio.Queue[
Tuple[Optional[str], Optional[str], Optional[str]]
]() # (transcript, client_id, audio_uuid)
- self.action_item_queue = asyncio.Queue[
- Tuple[Optional[str], Optional[str], Optional[str]]
- ]() # (transcript_text, client_id, audio_uuid)
# Per-client file sink
self.file_sink: Optional[LocalFileSink] = None
@@ -91,7 +87,6 @@ def __init__(self, client_id: str, audio_chunks_db_collection, action_items_serv
self.saver_task: Optional[asyncio.Task] = None
self.transcription_task: Optional[asyncio.Task] = None
self.memory_task: Optional[asyncio.Task] = None
- self.action_item_task: Optional[asyncio.Task] = None
self.background_memory_task: Optional[asyncio.Task] = None
# Debug tracking
@@ -137,7 +132,6 @@ async def start_processing(self):
self.saver_task = asyncio.create_task(self._audio_saver())
self.transcription_task = asyncio.create_task(self._transcription_processor())
self.memory_task = asyncio.create_task(self._memory_processor())
- self.action_item_task = asyncio.create_task(self._action_item_processor())
audio_logger.info(f"Started processing tasks for client {self.client_id}")
async def disconnect(self):
@@ -155,7 +149,6 @@ async def disconnect(self):
await self.chunk_queue.put(None)
await self.transcription_queue.put((None, None))
await self.memory_queue.put((None, None, None))
- await self.action_item_queue.put((None, None, None))
# Wait for tasks to complete gracefully, with cancellation fallback
# Use longer timeouts for transcription tasks that may be waiting on Deepgram API
@@ -170,8 +163,6 @@ async def disconnect(self):
tasks_to_cleanup.append(("transcription", self.transcription_task, transcription_timeout))
if self.memory_task:
tasks_to_cleanup.append(("memory", self.memory_task, default_timeout))
- if self.action_item_task:
- tasks_to_cleanup.append(("action_item", self.action_item_task, default_timeout))
# Background memory task gets much longer timeout since it could be doing Ollama processing
if self.background_memory_task:
@@ -423,7 +414,7 @@ async def _process_memory_background(self, full_conversation: str, audio_uuid: s
# Add general memory with fallback handling
memory_result = await memory_service.add_memory(
full_conversation, self.client_id, audio_uuid, user_id, user_email,
- chunk_repo=self.chunk_repo
+ db_helper=self.db_helper
)
if memory_result:
@@ -490,7 +481,7 @@ async def _audio_saver(self):
# Reset conversation closure flag when starting new audio
self.conversation_closed = False
- await self.chunk_repo.create_chunk(
+ await self.db_helper.create_chunk(
audio_uuid=self.current_audio_uuid,
audio_path=wav_filename,
client_id=self.client_id,
@@ -536,18 +527,8 @@ async def _transcription_processor(self):
# Get or create transcription manager
if self.transcription_manager is None:
- # Create callback function to queue action items
- async def action_item_callback(transcript_text, client_id, audio_uuid):
- try:
- await self.action_item_queue.put(
- (transcript_text, client_id, audio_uuid)
- )
- except Exception:
- pass # Ignore errors during shutdown
-
self.transcription_manager = TranscriptionManager(
- action_item_callback=action_item_callback,
- chunk_repo=self.chunk_repo
+ chunk_repo=self.db_helper
)
try:
await self.transcription_manager.connect(self.client_id)
@@ -653,82 +634,3 @@ async def _memory_processor(self):
finally:
audio_logger.debug(f"Memory processor stopped for client {self.client_id}")
- async def _action_item_processor(self):
- """
- Processes transcript segments from the per-client action item queue.
-
- This processor handles queue management and delegates the actual
- action item processing to the ActionItemsService.
- """
- try:
- while self.connected:
- try:
- transcript_text, client_id, audio_uuid = await self.action_item_queue.get()
-
- if (
- transcript_text is None or client_id is None or audio_uuid is None
- ): # Disconnect signal
- self.action_item_queue.task_done()
- break
-
- try:
- # Resolve client_id to user information
- user = await get_user_by_client_id(client_id)
- if not user:
- audio_logger.error(
- f"Could not resolve client_id {client_id} to user for action item processing"
- )
- continue
-
- # Track action item processing start
-
- try:
- # Delegate action item processing to the service
- action_item_count = (
- await self.action_items_service.process_transcript_for_action_items(
- transcript_text, client_id, audio_uuid, user.user_id, user.email
- )
- )
-
- if action_item_count > 0:
- audio_logger.info(
- f"đ¯ Action item processor completed: {action_item_count} items processed for {audio_uuid}"
- )
- else:
- audio_logger.debug(
- f"âšī¸ Action item processor completed: no items found for {audio_uuid}"
- )
-
- except Exception as e:
- audio_logger.error(
- f"Error processing action item for client {self.client_id}: {e}"
- )
- except Exception as e:
- audio_logger.error(
- f"Error processing action item for client {self.client_id}: {e}"
- )
- finally:
- # Always mark task as done
- self.action_item_queue.task_done()
-
- except asyncio.CancelledError:
- # Handle cancellation gracefully
- audio_logger.debug(
- f"Action item processor cancelled for client {self.client_id}"
- )
- break
- except Exception as e:
- audio_logger.error(
- f"Error in action item processor loop for client {self.client_id}: {e}",
- exc_info=True,
- )
-
- except asyncio.CancelledError:
- audio_logger.debug(f"Action item processor cancelled for client {self.client_id}")
- except Exception as e:
- audio_logger.error(
- f"Error in action item processor for client {self.client_id}: {e}",
- exc_info=True,
- )
- finally:
- audio_logger.debug(f"Action item processor stopped for client {self.client_id}")
diff --git a/backends/advanced-backend/src/advanced_omi_backend/database.py b/backends/advanced-backend/src/advanced_omi_backend/database.py
index 67b56ba7..6269c6b6 100644
--- a/backends/advanced-backend/src/advanced_omi_backend/database.py
+++ b/backends/advanced-backend/src/advanced_omi_backend/database.py
@@ -22,7 +22,6 @@
chunks_col = db["audio_chunks"]
users_col = db["users"]
speakers_col = db["speakers"]
-action_items_col = db["action_items"]
def get_database():
@@ -36,7 +35,6 @@ def get_collections():
"chunks_col": chunks_col,
"users_col": users_col,
"speakers_col": speakers_col,
- "action_items_col": action_items_col,
}
diff --git a/backends/advanced-backend/src/advanced_omi_backend/llm_client.py b/backends/advanced-backend/src/advanced_omi_backend/llm_client.py
new file mode 100644
index 00000000..f97dea96
--- /dev/null
+++ b/backends/advanced-backend/src/advanced_omi_backend/llm_client.py
@@ -0,0 +1,162 @@
+"""
+Abstract LLM client interface for unified LLM operations across different providers.
+
+This module provides a standardized interface for LLM operations that works with
+OpenAI, Ollama, Anthropic, and other OpenAI-compatible APIs.
+"""
+
+import asyncio
+import logging
+import os
+from abc import ABC, abstractmethod
+from typing import Dict, Optional
+
+logger = logging.getLogger(__name__)
+
+
+class LLMClient(ABC):
+ """Abstract base class for LLM clients."""
+
+ def __init__(self, model: str | None = None, temperature: float = 0.1):
+ self.model = model
+ self.temperature = temperature
+ self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
+
+ @abstractmethod
+ def generate(self, prompt: str, model: str | None = None, temperature: float | None = None) -> str:
+ """Generate text completion from prompt."""
+ pass
+
+ @abstractmethod
+ def health_check(self) -> Dict:
+ """Check if the LLM service is available and healthy."""
+ pass
+
+ @abstractmethod
+ def get_default_model(self) -> str:
+ """Get the default model for this client."""
+ pass
+
+
+class OpenAILLMClient(LLMClient):
+ """OpenAI-compatible LLM client that works with OpenAI, Ollama, and other compatible APIs."""
+
+ def __init__(self, api_key: str | None = None, base_url: str | None = None, model: str | None = None, temperature: float = 0.1):
+ super().__init__(model, temperature)
+ self.api_key = api_key or os.getenv("OPENAI_API_KEY", "dummy")
+ self.base_url = base_url or os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1")
+ self.model = model or os.getenv("OPENAI_MODEL", "gpt-4o")
+
+ # Initialize OpenAI client
+ try:
+ import openai
+ self.client = openai.OpenAI(
+ api_key=self.api_key,
+ base_url=self.base_url
+ )
+ self.logger.info(f"OpenAI client initialized with base_url: {self.base_url}")
+ except ImportError:
+ self.logger.error("OpenAI library not installed. Install with: pip install openai")
+ raise
+ except Exception as e:
+ self.logger.error(f"Failed to initialize OpenAI client: {e}")
+ raise
+
+ def generate(self, prompt: str, model: str | None = None, temperature: float | None = None) -> str:
+ """Generate text completion using OpenAI-compatible API."""
+ try:
+ response = self.client.chat.completions.create(
+ model=model or self.model,
+ messages=[{"role": "user", "content": prompt}],
+ temperature=temperature or self.temperature,
+ max_tokens=2000
+ )
+ return response.choices[0].message.content.strip()
+ except Exception as e:
+ self.logger.error(f"Error generating completion: {e}")
+ raise
+
+ def health_check(self) -> Dict:
+ """Check OpenAI-compatible service health."""
+ try:
+ # Try to list models or make a simple completion
+ models = self.client.models.list()
+ model_count = len(list(models.data))
+
+ return {
+ "status": "â
Connected",
+ "model_count": model_count,
+ "base_url": self.base_url,
+ "default_model": self.model
+ }
+ except Exception as e:
+ self.logger.error(f"Health check failed: {e}")
+ return {
+ "status": "â Failed",
+ "error": str(e),
+ "base_url": self.base_url,
+ "default_model": self.model
+ }
+
+ def get_default_model(self) -> str:
+ """Get the default model for this client."""
+ return self.model
+
+
+class LLMClientFactory:
+ """Factory for creating LLM clients based on environment configuration."""
+
+ @staticmethod
+ def create_client() -> LLMClient:
+ """Create an LLM client based on LLM_PROVIDER environment variable."""
+ provider = os.getenv("LLM_PROVIDER", "openai").lower()
+
+ if provider in ["openai", "ollama"]:
+ return OpenAILLMClient(
+ api_key=os.getenv("OPENAI_API_KEY", "dummy"),
+ base_url=os.getenv("OPENAI_BASE_URL"),
+ model=os.getenv("OPENAI_MODEL", "gpt-4o")
+ )
+ elif provider == "anthropic":
+ # Future implementation for Anthropic
+ raise NotImplementedError("Anthropic provider not yet implemented")
+ else:
+ raise ValueError(f"Unsupported LLM provider: {provider}")
+
+ @staticmethod
+ def get_supported_providers() -> list:
+ """Get list of supported LLM providers."""
+ return ["openai", "ollama"]
+
+
+# Global LLM client instance
+_llm_client = None
+
+
+def get_llm_client() -> LLMClient:
+ """Get the global LLM client instance (singleton pattern)."""
+ global _llm_client
+ if _llm_client is None:
+ _llm_client = LLMClientFactory.create_client()
+ return _llm_client
+
+
+def reset_llm_client():
+ """Reset the global LLM client instance (useful for testing)."""
+ global _llm_client
+ _llm_client = None
+
+
+# Async wrapper for blocking LLM operations
+async def async_generate(prompt: str, model: str | None = None, temperature: float | None = None) -> str:
+ """Async wrapper for LLM text generation."""
+ client = get_llm_client()
+ loop = asyncio.get_running_loop()
+ return await loop.run_in_executor(None, client.generate, prompt, model, temperature)
+
+
+async def async_health_check() -> Dict:
+ """Async wrapper for LLM health check."""
+ client = get_llm_client()
+ loop = asyncio.get_running_loop()
+ return await loop.run_in_executor(None, client.health_check)
\ No newline at end of file
diff --git a/backends/advanced-backend/src/advanced_omi_backend/main.py b/backends/advanced-backend/src/advanced_omi_backend/main.py
index 5fbe84b2..da3ec3da 100644
--- a/backends/advanced-backend/src/advanced_omi_backend/main.py
+++ b/backends/advanced-backend/src/advanced_omi_backend/main.py
@@ -21,7 +21,6 @@
from pathlib import Path
from typing import Optional
-import ollama
# Import Beanie for user management
from beanie import init_beanie
@@ -39,7 +38,6 @@
from wyoming.audio import AudioChunk
from wyoming.client import AsyncTcpClient
-from advanced_omi_backend.action_items_service import ActionItemsService
from advanced_omi_backend.client import ClientState
# Import authentication components
@@ -102,7 +100,6 @@
chunks_col = db["audio_chunks"]
users_col = db["users"]
speakers_col = db["speakers"]
-action_items_col = db["action_items"]
# Audio Configuration
OMI_SAMPLE_RATE = 16_000 # Hz
@@ -135,13 +132,11 @@
deepgram_client = None
# Ollama & Qdrant Configuration
-OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://ollama:11434")
QDRANT_BASE_URL = os.getenv("QDRANT_BASE_URL", "qdrant")
# Memory configuration is now handled in the memory module
# Initialize it with our Ollama and Qdrant URLs
init_memory_config(
- ollama_base_url=OLLAMA_BASE_URL,
qdrant_base_url=QDRANT_BASE_URL,
)
@@ -153,11 +148,8 @@
thread_name_prefix="opus_io",
)
-# Initialize memory service, speaker service, and ollama client
+# Initialize memory service
memory_service = get_memory_service()
-ollama_client = ollama.Client(host=OLLAMA_BASE_URL)
-
-action_items_service = ActionItemsService(action_items_col, ollama_client)
###############################################################################
# UTILITY FUNCTIONS & HELPER CLASSES
@@ -165,7 +157,7 @@
# Initialize repository and global state
-audio_chunks_db_collection = AudioChunksCollectionHelper(chunks_col)
+ac_db_collection_helper = AudioChunksCollectionHelper(chunks_col)
active_clients: dict[str, ClientState] = {}
# Client-to-user mapping for reliable permission checking
@@ -208,7 +200,7 @@ async def create_client_state(
client_id: str, user: User, device_name: Optional[str] = None
) -> ClientState:
"""Create and register a new client state."""
- client_state = ClientState(client_id, audio_chunks_db_collection, action_items_service, CHUNK_DIR, user.user_id, user.email)
+ client_state = ClientState(client_id, ac_db_collection_helper, CHUNK_DIR, user.user_id, user.email)
active_clients[client_id] = client_state
# Register client-user mapping (for active clients)
@@ -508,7 +500,6 @@ async def health_check():
"services": {},
"config": {
"mongodb_uri": MONGODB_URI,
- "ollama_url": OLLAMA_BASE_URL,
"qdrant_url": f"http://{QDRANT_BASE_URL}:6333",
"transcription_service": ("Deepgram WebSocket" if USE_DEEPGRAM else "Offline ASR"),
"asr_uri": (OFFLINE_ASR_TCP_URI if not USE_DEEPGRAM else "wss://api.deepgram.com"),
@@ -516,10 +507,10 @@ async def health_check():
"chunk_dir": str(CHUNK_DIR),
"active_clients": len(active_clients),
"new_conversation_timeout_minutes": NEW_CONVERSATION_TIMEOUT_MINUTES,
- "action_items_enabled": True,
"audio_cropping_enabled": AUDIO_CROPPING_ENABLED,
- "llm_provider": os.getenv("LLM_PROVIDER", "ollama"),
- "llm_model": os.getenv("OPENAI_MODEL" if os.getenv("LLM_PROVIDER", "ollama").lower() == "openai" else "OLLAMA_MODEL", "gpt-4o" if os.getenv("LLM_PROVIDER", "ollama").lower() == "openai" else "gemma3n:e4b"),
+ "llm_provider": os.getenv("LLM_PROVIDER", "openai"),
+ "llm_model": os.getenv("OPENAI_MODEL", "gpt-4o"),
+ "llm_base_url": os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1"),
},
}
@@ -551,27 +542,27 @@ async def health_check():
overall_healthy = False
critical_services_healthy = False
- # Check Ollama (non-critical service - may not be running)
+ # Check LLM service (non-critical service - may not be running)
try:
- # Run in executor to avoid blocking the main thread
- loop = asyncio.get_running_loop()
- models = await asyncio.wait_for(loop.run_in_executor(None, ollama_client.list), timeout=8.0)
- model_count = len(models.get("models", []))
- health_status["services"]["ollama"] = {
- "status": "â
Connected",
- "healthy": True,
- "models": model_count,
+ from advanced_omi_backend.llm_client import async_health_check
+
+ llm_health = await asyncio.wait_for(async_health_check(), timeout=8.0)
+ health_status["services"]["llm"] = {
+ "status": llm_health.get("status", "â Unknown"),
+ "healthy": "â
" in llm_health.get("status", ""),
+ "base_url": llm_health.get("base_url", ""),
+ "model": llm_health.get("default_model", ""),
"critical": False,
}
except asyncio.TimeoutError:
- health_status["services"]["ollama"] = {
+ health_status["services"]["llm"] = {
"status": "â ī¸ Connection Timeout (8s) - Service may not be running",
"healthy": False,
"critical": False,
}
overall_healthy = False
except Exception as e:
- health_status["services"]["ollama"] = {
+ health_status["services"]["llm"] = {
"status": f"â ī¸ Connection Failed: {str(e)} - Service may not be running",
"healthy": False,
"critical": False,
diff --git a/backends/advanced-backend/src/advanced_omi_backend/memory/memory_service.py b/backends/advanced-backend/src/advanced_omi_backend/memory/memory_service.py
index 2a04e3c7..ce04c3ea 100644
--- a/backends/advanced-backend/src/advanced_omi_backend/memory/memory_service.py
+++ b/backends/advanced-backend/src/advanced_omi_backend/memory/memory_service.py
@@ -3,7 +3,6 @@
This module provides:
- Memory configuration and initialization
- Memory operations (add, get, search, delete)
-- Action item extraction and management
- Debug tracking and configurable extraction
"""
@@ -20,6 +19,7 @@
# Import debug tracker and config loader
from advanced_omi_backend.debug_system_tracker import PipelineStage, get_debug_tracker
from advanced_omi_backend.memory_config_loader import get_config_loader
+from advanced_omi_backend.users import User
# Configure Mem0 telemetry based on environment variable
# Set default to False for privacy unless explicitly enabled
@@ -47,7 +47,6 @@
MEM0_APP_ID = os.getenv("MEM0_APP_ID", "omi-backend")
# Ollama & Qdrant Configuration (these should match main config)
-OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://ollama:11434")
QDRANT_BASE_URL = os.getenv("QDRANT_BASE_URL", "qdrant")
# Timeout configurations
@@ -66,11 +65,11 @@ def _build_mem0_config() -> dict:
llm_settings = memory_config.get("llm_settings", {})
# Get LLM provider from environment or config
- llm_provider = os.getenv("LLM_PROVIDER", "ollama").lower()
+ llm_provider = os.getenv("LLM_PROVIDER", "openai").lower()
- # Build LLM configuration based on provider
+ # Build LLM configuration based on provider using standard environment variables
if llm_provider == "openai":
- # Use dedicated OPENAI_MODEL environment variable with GPT-4o as default for better JSON parsing
+ # Use standard OPENAI_MODEL environment variable
openai_model = os.getenv("OPENAI_MODEL", "gpt-4o")
# Allow YAML config to override environment variable
@@ -83,6 +82,7 @@ def _build_mem0_config() -> dict:
"config": {
"model": model,
"api_key": os.getenv("OPENAI_API_KEY"),
+ "base_url": os.getenv("OPENAI_BASE_URL"), # Support custom base URL
"temperature": llm_settings.get("temperature", 0.1),
"max_tokens": llm_settings.get("max_tokens", 2000),
},
@@ -94,37 +94,62 @@ def _build_mem0_config() -> dict:
"model": "text-embedding-3-small",
"embedding_dims": 1536,
"api_key": os.getenv("OPENAI_API_KEY"),
+ "base_url": os.getenv("OPENAI_BASE_URL"), # Support custom base URL
},
}
embedding_dims = 1536
- else: # Default to ollama
- # Use dedicated OLLAMA_MODEL environment variable with fallback
- ollama_model = os.getenv("OLLAMA_MODEL", "gemma3n:e4b")
+ elif llm_provider == "ollama":
+ # Use standard OPENAI_MODEL environment variable (Ollama as OpenAI-compatible)
+ ollama_model = os.getenv("OPENAI_MODEL", "llama3.1:latest")
# Allow YAML config to override environment variable
model = llm_settings.get("model", ollama_model)
memory_logger.info(f"Using Ollama provider with model: {model}")
+ # Use OpenAI-compatible configuration for Ollama
llm_config = {
- "provider": "ollama",
+ "provider": "openai", # Use OpenAI provider for Ollama compatibility
"config": {
"model": model,
- "ollama_base_url": OLLAMA_BASE_URL,
+ "api_key": os.getenv("OPENAI_API_KEY", "dummy"), # Ollama doesn't need real key
+ "base_url": os.getenv("OPENAI_BASE_URL", "http://ollama:11434/v1"),
"temperature": llm_settings.get("temperature", 0.1),
"max_tokens": llm_settings.get("max_tokens", 2000),
},
}
- # For Ollama, use Ollama embeddings
+ # For Ollama, use Ollama embeddings with OpenAI-compatible config
embedder_config = {
"provider": "ollama",
"config": {
"model": "nomic-embed-text:latest",
"embedding_dims": 768,
- "ollama_base_url": OLLAMA_BASE_URL,
+ "ollama_base_url": os.getenv("OPENAI_BASE_URL", "http://ollama:11434"),
},
}
embedding_dims = 768
+ else:
+ raise ValueError(f"Unsupported LLM provider: {llm_provider}")
+
+ # Build Neo4j graph store configuration
+ neo4j_config = None
+ neo4j_host = os.getenv("NEO4J_HOST", "neo4j-mem0")
+ neo4j_user = os.getenv("NEO4J_USER", "neo4j")
+ neo4j_password = os.getenv("NEO4J_PASSWORD", "password")
+
+ if neo4j_host and neo4j_user and neo4j_password:
+ neo4j_config = {
+ "provider": "neo4j",
+ "config": {
+ "url": f"bolt://{neo4j_host}:7687",
+ "username": neo4j_user,
+ "password": neo4j_password,
+ "database": "neo4j",
+ },
+ }
+ memory_logger.info(f"Neo4j graph store configured: {neo4j_host}")
+ else:
+ memory_logger.warning("Neo4j configuration incomplete - graph store disabled")
# Valid mem0 configuration format based on official documentation
# See: https://docs.mem0.ai/platform/quickstart and https://github.com/mem0ai/mem0
@@ -143,6 +168,10 @@ def _build_mem0_config() -> dict:
"version": "v1.1"
}
+ # Add graph store configuration if available
+ if neo4j_config:
+ mem0_config["graph_store"] = neo4j_config
+
# Configure fact extraction - ALWAYS ENABLE for proper memory creation
fact_enabled = config_loader.is_fact_extraction_enabled()
memory_logger.info(f"YAML fact extraction enabled: {fact_enabled}")
@@ -185,8 +214,6 @@ def _build_mem0_config() -> dict:
# Global memory configuration - built dynamically from YAML config
MEM0_CONFIG = _build_mem0_config()
-# Action item extraction is now handled by ActionItemsService
-# using configuration from memory_config.yaml
# Global instances
_memory_service = None
@@ -194,7 +221,6 @@ def _build_mem0_config() -> dict:
def init_memory_config(
- ollama_base_url: Optional[str] = None,
qdrant_base_url: Optional[str] = None,
organization_id: Optional[str] = None,
project_id: Optional[str] = None,
@@ -204,12 +230,10 @@ def init_memory_config(
global MEM0_CONFIG, MEM0_ORGANIZATION_ID, MEM0_PROJECT_ID, MEM0_APP_ID
memory_logger.info(
- f"Initializing MemoryService with Qdrant URL: {qdrant_base_url} and Ollama base URL: {ollama_base_url}"
+ f"Initializing MemoryService with Qdrant URL: {qdrant_base_url}"
)
- if ollama_base_url:
- MEM0_CONFIG["llm"]["config"]["ollama_base_url"] = ollama_base_url
- MEM0_CONFIG["embedder"]["config"]["ollama_base_url"] = ollama_base_url
+ # Configuration updates would go here if needed
if qdrant_base_url:
MEM0_CONFIG["vector_store"]["config"]["host"] = qdrant_base_url
@@ -731,12 +755,8 @@ def _add_memory_to_store(
return False, []
-# Action item extraction functions removed - now handled by ActionItemsService
-# See action_items_service.py for the main action item processing logic
-# Action item storage functions removed - now handled by ActionItemsService
-# See action_items_service.py for the main action item processing logic
class MemoryService:
@@ -786,7 +806,7 @@ async def add_memory(
user_id: str,
user_email: str,
allow_update: bool = False,
- chunk_repo=None,
+ db_helper=None,
) -> bool:
"""Add memory in background process (non-blocking).
@@ -827,15 +847,15 @@ async def add_memory(
f"Added transcript for {audio_uuid} to mem0 (user: {user_email}, client: {client_id})"
)
# Update the database relationship if memories were created and chunk_repo is available
- if created_memory_ids and chunk_repo:
+ if created_memory_ids and db_helper:
try:
for memory_id in created_memory_ids:
- await chunk_repo.add_memory_reference(audio_uuid, memory_id, "created")
+ await db_helper.add_memory_reference(audio_uuid, memory_id, "created")
memory_logger.info(f"Added memory reference {memory_id} to audio chunk {audio_uuid}")
except Exception as db_error:
memory_logger.error(f"Failed to update database relationship for {audio_uuid}: {db_error}")
# Don't fail the entire operation if database update fails
- elif created_memory_ids and not chunk_repo:
+ elif created_memory_ids and not db_helper:
memory_logger.warning(f"Created memories {created_memory_ids} for {audio_uuid} but no chunk_repo provided to update database relationship")
else:
memory_logger.error(f"Failed to add memory for {audio_uuid}")
@@ -849,16 +869,6 @@ async def add_memory(
memory_logger.error(f"Error adding memory for {audio_uuid}: {e}")
return False
- # Action item methods removed - now handled by ActionItemsService
- # See action_items_service.py for the main action item processing logic
-
- # get_action_items method removed - now handled by ActionItemsService
-
- # update_action_item_status method removed - now handled by ActionItemsService
-
- # search_action_items method removed - now handled by ActionItemsService
-
- # search_action_items and delete_action_item methods removed - now handled by ActionItemsService
def get_all_memories(self, user_id: str, limit: int = 100) -> list:
"""Get all memories for a user, filtering and prioritizing semantic memories over fallback transcript memories."""
@@ -1065,31 +1075,17 @@ def delete_memory(self, memory_id: str) -> bool:
memory_logger.error(f"Error deleting memory {memory_id}: {e}")
raise
- def get_all_memories_debug(self, limit: int = 200) -> list:
+ async def get_all_memories_debug(self, limit: int = 200) -> list:
"""Get all memories across all users for admin debugging. Admin only."""
if not self._initialized:
- # This is a sync method, so we need to handle initialization differently
- loop = asyncio.get_event_loop()
- if loop.is_running():
- # If we're in an async context, we can't call initialize() directly
- # This should be handled by the caller
- raise Exception("Memory service not initialized - call await initialize() first")
- else:
- # We're in a sync context, run the async initialize
- loop.run_until_complete(self.initialize())
+ await self.initialize()
assert self.memory is not None, "Memory service not initialized"
try:
all_memories = []
- # First, we need to get a list of all users who have memories
- # We'll do this by getting user_ids from the database or using a small Qdrant query
- # to find unique user_ids, then use the proper memory service methods
-
- from advanced_omi_backend.users import get_all_users
-
# Get all users from the database
- users = get_all_users()
+ users = await User.find_all().to_list()
memory_logger.info(f"đ Found {len(users)} users for admin debug")
for user in users:
diff --git a/backends/advanced-backend/src/advanced_omi_backend/memory_config_loader.py b/backends/advanced-backend/src/advanced_omi_backend/memory_config_loader.py
index 33eff3c8..66d555ff 100644
--- a/backends/advanced-backend/src/advanced_omi_backend/memory_config_loader.py
+++ b/backends/advanced-backend/src/advanced_omi_backend/memory_config_loader.py
@@ -72,12 +72,6 @@ def _get_default_config(self) -> Dict[str, Any]:
"prompt": "Extract specific facts from this conversation.",
"llm_settings": {"temperature": 0.0, "max_tokens": 1500, "model": default_model},
},
- "action_item_extraction": {
- "enabled": True,
- "trigger_phrases": ["simon says", "action item", "todo"],
- "prompt": "Extract action items from this conversation.",
- "llm_settings": {"temperature": 0.1, "max_tokens": 1000, "model": default_model},
- },
"categorization": {
"enabled": False,
"categories": ["work", "personal", "meeting", "other"],
@@ -132,9 +126,6 @@ def get_fact_extraction_config(self) -> Dict[str, Any]:
"""Get fact extraction configuration."""
return self.config.get("fact_extraction", {})
- def get_action_item_extraction_config(self) -> Dict[str, Any]:
- """Get action item extraction configuration."""
- return self.config.get("action_item_extraction", {})
def get_categorization_config(self) -> Dict[str, Any]:
"""Get categorization configuration."""
@@ -164,9 +155,6 @@ def is_fact_extraction_enabled(self) -> bool:
"""Check if fact extraction is enabled."""
return self.get_fact_extraction_config().get("enabled", False)
- def is_action_item_extraction_enabled(self) -> bool:
- """Check if action item extraction is enabled."""
- return self.get_action_item_extraction_config().get("enabled", True)
def is_categorization_enabled(self) -> bool:
"""Check if categorization is enabled."""
@@ -188,11 +176,6 @@ def get_fact_prompt(self) -> str:
"prompt", "Extract specific facts from this conversation."
)
- def get_action_item_prompt(self) -> str:
- """Get the action item extraction prompt."""
- return self.get_action_item_extraction_config().get(
- "prompt", "Extract action items from this conversation."
- )
def get_categorization_prompt(self) -> str:
"""Get the categorization prompt."""
@@ -203,15 +186,13 @@ def get_llm_settings(self, extraction_type: str) -> Dict[str, Any]:
Get LLM settings for a specific extraction type.
Args:
- extraction_type: One of 'memory', 'fact', 'action_item', 'categorization'
+ extraction_type: One of 'memory', 'fact', 'categorization'
"""
config_key = f"{extraction_type}_extraction"
if extraction_type == "memory":
config_key = "memory_extraction"
elif extraction_type == "fact":
config_key = "fact_extraction"
- elif extraction_type == "action_item":
- config_key = "action_item_extraction"
elif extraction_type == "categorization":
config_key = "categorization"
@@ -289,20 +270,6 @@ def should_skip_conversation(self, conversation_text: str) -> bool:
return False
- def get_action_item_triggers(self) -> list[str]:
- """Get action item trigger phrases."""
- return self.get_action_item_extraction_config().get("trigger_phrases", [])
-
- def has_action_item_triggers(self, conversation_text: str) -> bool:
- """Check if conversation contains action item trigger phrases."""
- triggers = self.get_action_item_triggers()
- conversation_lower = conversation_text.lower()
-
- for trigger in triggers:
- if trigger.lower() in conversation_lower:
- return True
-
- return False
def get_categories(self) -> list[str]:
"""Get available categories for classification."""
diff --git a/backends/advanced-backend/src/advanced_omi_backend/routers/modules/memory_routes.py b/backends/advanced-backend/src/advanced_omi_backend/routers/modules/memory_routes.py
index 70c7d197..f91f8e0b 100644
--- a/backends/advanced-backend/src/advanced_omi_backend/routers/modules/memory_routes.py
+++ b/backends/advanced-backend/src/advanced_omi_backend/routers/modules/memory_routes.py
@@ -183,9 +183,7 @@ async def get_all_memories_admin(current_user: User = Depends(current_superuser)
debug_tracker = get_debug_tracker()
# Get all memories without user filtering
- all_memories = await asyncio.get_running_loop().run_in_executor(
- None, memory_service.get_all_memories_debug, limit
- )
+ all_memories = await memory_service.get_all_memories_debug(limit)
# Group by user for easier admin review
user_memories = {}
diff --git a/backends/advanced-backend/src/advanced_omi_backend/transcription.py b/backends/advanced-backend/src/advanced_omi_backend/transcription.py
index 6fb7f6c6..1691842e 100644
--- a/backends/advanced-backend/src/advanced_omi_backend/transcription.py
+++ b/backends/advanced-backend/src/advanced_omi_backend/transcription.py
@@ -24,14 +24,14 @@
class TranscriptionManager:
"""Manages transcription using either Deepgram batch API or offline ASR service."""
- def __init__(self, action_item_callback=None, chunk_repo=None):
+ # TODO: Accept callbacks list
+ def __init__(self, chunk_repo=None):
self.client = None
self._current_audio_uuid = None
self.use_deepgram = USE_DEEPGRAM
self._audio_buffer = [] # Buffer for collecting audio chunks
self._audio_start_time = None # Track when audio collection started
self._max_collection_time = 90.0 # 1.5 minutes timeout
- self.action_item_callback = action_item_callback # Callback to queue action items
self._current_transaction_id = None # Track current debug transaction
self.chunk_repo = chunk_repo # Database repository for chunks
self.client_manager = get_client_manager() # Cached client manager instance
diff --git a/backends/advanced-backend/src/webui/streamlit_app.py b/backends/advanced-backend/src/webui/streamlit_app.py
index 8b4431a1..fdc938f2 100644
--- a/backends/advanced-backend/src/webui/streamlit_app.py
+++ b/backends/advanced-backend/src/webui/streamlit_app.py
@@ -1355,15 +1355,15 @@ def delete_data(endpoint: str, params: dict | None = None, require_auth: bool =
with tab_mem:
logger.debug("đ§ Loading memories tab...")
- st.header("Memories & Action Items")
+ st.header("Memories")
# Use session state for selected user if available
default_user = st.session_state.get('selected_user', '')
- # User selection for memories and action items
+ # User selection for memories
col1, col2 = st.columns([2, 1])
with col1:
- user_id_input = st.text_input("Enter username to view memories & action items:",
+ user_id_input = st.text_input("Enter username to view memories:",
value=default_user,
placeholder="e.g., john_doe, alice123")
with col2:
@@ -1378,23 +1378,15 @@ def delete_data(endpoint: str, params: dict | None = None, require_auth: bool =
logger.info("đ Manual memories refresh requested")
st.rerun()
- # Get memories and action items based on user selection
+ # Get memories based on user selection
if user_id_input.strip():
logger.info(f"đ§ Loading data for user: {user_id_input.strip()}")
st.info(f"Showing data for user: **{user_id_input.strip()}**")
- # Load both memories and action items
- col1, col2 = st.columns([1, 1])
-
- with col1:
- with st.spinner("Loading memories..."):
- logger.debug(f"đĄ Fetching memories for user: {user_id_input.strip()}")
- memories_response = get_data(f"/api/memories?user_id={user_id_input.strip()}", require_auth=True)
-
- with col2:
- with st.spinner("Loading action items..."):
- logger.debug(f"đĄ Fetching action items for user: {user_id_input.strip()}")
- action_items_response = get_data(f"/api/action-items?user_id={user_id_input.strip()}", require_auth=True)
+ # Load memories
+ with st.spinner("Loading memories..."):
+ logger.debug(f"đĄ Fetching memories for user: {user_id_input.strip()}")
+ memories_response = get_data(f"/api/memories?user_id={user_id_input.strip()}", require_auth=True)
# Handle the API response format with "results" wrapper for memories
if memories_response and isinstance(memories_response, dict) and "results" in memories_response:
@@ -1404,19 +1396,11 @@ def delete_data(endpoint: str, params: dict | None = None, require_auth: bool =
memories = memories_response
logger.debug(f"đ§ Memories response format: {type(memories_response)}")
- # Handle action items response
- if action_items_response and isinstance(action_items_response, dict) and "action_items" in action_items_response:
- action_items = action_items_response["action_items"]
- logger.debug(f"đ¯ Action items response has 'action_items' wrapper, extracted {len(action_items)} items")
- else:
- action_items = action_items_response if action_items_response else []
- logger.debug(f"đ¯ Action items response format: {type(action_items_response)}")
else:
# Show instruction to enter a username
memories = None
- action_items = None
logger.debug("đ No user ID provided, showing instructions")
- st.info("đ Please enter a username above to view their memories and action items.")
+ st.info("đ Please enter a username above to view their memories.")
st.markdown("đĄ **Tip:** You can find existing usernames in the 'User Management' tab.")
# Admin Debug Section - Show before regular memories
@@ -1788,244 +1772,6 @@ def delete_data(endpoint: str, params: dict | None = None, require_auth: bool =
logger.info(f"đ§ No memories found for user {user_id_input.strip()}")
st.info("No memories found for this user.")
- # Display Action Items Section
- if action_items is not None:
- logger.debug("đ¯ Displaying action items section...")
- st.subheader("đ¯ Action Items")
-
- if action_items:
- logger.info(f"đ¯ Displaying {len(action_items)} action items for user {user_id_input.strip()}")
-
- # Status filter for action items
- col1, col2, col3 = st.columns([2, 1, 1])
- with col1:
- status_filter = st.selectbox(
- "Filter by status:",
- options=["All", "open", "in_progress", "completed", "cancelled"],
- index=0,
- key="action_items_filter"
- )
- with col2:
- show_stats = st.button("đ Show Stats", key="show_action_stats")
- with col3:
- # Manual action item creation button
- if st.button("â Add Item", key="add_action_item"):
- logger.info("â Manual action item creation requested")
- st.session_state['show_add_action_item'] = True
-
- # Filter action items by status
- if status_filter != "All":
- filtered_items = [item for item in action_items if item.get('status') == status_filter]
- logger.debug(f"đ¯ Filtered action items by status '{status_filter}': {len(filtered_items)} items")
- else:
- filtered_items = action_items
- logger.debug(f"đ¯ Showing all action items: {len(filtered_items)} items")
-
- # Show statistics if requested
- if show_stats:
- logger.info("đ Action items statistics requested")
- stats_response = get_data(f"/api/action-items/stats?user_id={user_id_input.strip()}", require_auth=True)
- if stats_response and "statistics" in stats_response:
- stats = stats_response["statistics"]
- logger.debug(f"đ Action items statistics: {stats}")
-
- # Display stats in columns
- col1, col2, col3, col4 = st.columns(4)
- with col1:
- st.metric("Total", stats["total"])
- st.metric("Open", stats["open"])
- with col2:
- st.metric("In Progress", stats["in_progress"])
- st.metric("Completed", stats["completed"])
- with col3:
- st.metric("Cancelled", stats["cancelled"])
- st.metric("Overdue", stats.get("overdue", 0))
- with col4:
- st.write("**By Priority:**")
- for priority, count in stats.get("by_priority", {}).items():
- if count > 0:
- st.write(f"âĸ {priority.title()}: {count}")
-
- # Assignee breakdown
- if stats.get("by_assignee"):
- st.write("**By Assignee:**")
- assignee_df = pd.DataFrame(list(stats["by_assignee"].items()), columns=["Assignee", "Count"])
- st.dataframe(assignee_df, hide_index=True, use_container_width=True)
- else:
- logger.warning("đ Action items statistics not available")
-
- # Manual action item creation form
- if st.session_state.get('show_add_action_item', False):
- logger.debug("â Showing action item creation form")
- with st.expander("â Create New Action Item", expanded=True):
- with st.form("create_action_item"):
- description = st.text_input("Description*:", placeholder="e.g., Send quarterly report to management")
- col1, col2 = st.columns(2)
- with col1:
- assignee = st.text_input("Assignee:", placeholder="e.g., john_doe", value="unassigned")
- priority = st.selectbox("Priority:", options=["high", "medium", "low", "not_specified"], index=1)
- with col2:
- due_date = st.text_input("Due Date:", placeholder="e.g., Friday, 2024-01-15", value="not_specified")
- context = st.text_input("Context:", placeholder="e.g., Mentioned in team meeting")
-
- submitted = st.form_submit_button("Create Action Item")
-
- if submitted:
- logger.info(f"â Creating action item for user {user_id_input.strip()}")
- if description.strip():
- create_data = {
- "description": description.strip(),
- "assignee": assignee.strip() if assignee.strip() else "unassigned",
- "due_date": due_date.strip() if due_date.strip() else "not_specified",
- "priority": priority,
- "context": context.strip()
- }
-
- try:
- logger.debug(f"đ¤ Creating action item with data: {create_data}")
- response = requests.post(
- f"{BACKEND_API_URL}/api/action-items",
- json=create_data,
- headers=get_auth_headers()
- )
- response.raise_for_status()
- result = response.json()
- st.success(f"â
Action item created: {result['action_item']['description']}")
- logger.info(f"â
Action item created successfully: {result['action_item']['description']}")
- st.session_state['show_add_action_item'] = False
- st.rerun()
- except requests.exceptions.RequestException as e:
- logger.error(f"â Error creating action item: {e}")
- st.error(f"Error creating action item: {e}")
- else:
- logger.warning("â ī¸ Action item creation attempted without description")
- st.error("Please enter a description for the action item")
-
- if st.button("â Cancel", key="cancel_add_action"):
- logger.debug("â Action item creation cancelled")
- st.session_state['show_add_action_item'] = False
- st.rerun()
-
- # Display action items
- if filtered_items:
- logger.debug(f"đ¯ Displaying {len(filtered_items)} filtered action items")
- st.write(f"**Showing {len(filtered_items)} action items** (filtered by: {status_filter})")
-
- for i, item in enumerate(filtered_items):
- logger.debug(f"đ¯ Processing action item {i+1}: {item.get('description', 'No description')[:50]}...")
-
- with st.container():
- # Create columns for action item display
- col1, col2, col3 = st.columns([3, 1, 1])
-
- with col1:
- # Description with status badge
- status = item.get('status', 'open')
- status_emoji = {
- 'open': 'đĩ',
- 'in_progress': 'đĄ',
- 'completed': 'â
',
- 'cancelled': 'â'
- }.get(status, 'đĩ')
-
- st.write(f"**{status_emoji} {item.get('description', 'No description')}**")
-
- # Additional details
- details = []
- if item.get('assignee') and item.get('assignee') != 'unassigned':
- details.append(f"đ¤ {item['assignee']}")
- if item.get('due_date') and item.get('due_date') != 'not_specified':
- details.append(f"đ
{item['due_date']}")
- if item.get('priority') and item.get('priority') != 'not_specified':
- priority_emoji = {'high': 'đ´', 'medium': 'đĄ', 'low': 'đĸ'}.get(item['priority'], 'âĒ')
- details.append(f"{priority_emoji} {item['priority']}")
- if item.get('context'):
- details.append(f"đ {item['context']}")
-
- if details:
- st.caption(" | ".join(details))
-
- # Creation info
- created_at = item.get('created_at')
- if created_at:
- try:
- if isinstance(created_at, (int, float)):
- created_time = datetime.fromtimestamp(created_at)
- else:
- created_time = pd.to_datetime(created_at)
- st.caption(f"Created: {created_time.strftime('%Y-%m-%d %H:%M:%S')}")
- except:
- st.caption(f"Created: {created_at}")
-
- with col2:
- # Status update
- new_status = st.selectbox(
- "Status:",
- options=["open", "in_progress", "completed", "cancelled"],
- index=["open", "in_progress", "completed", "cancelled"].index(status),
- key=f"status_{i}_{item.get('memory_id', i)}"
- )
-
- if new_status != status:
- if st.button("Update", key=f"update_{i}_{item.get('memory_id', i)}"):
- memory_id = item.get('memory_id')
- if memory_id:
- logger.info(f"đ Updating action item {memory_id} status from {status} to {new_status}")
- try:
- response = requests.put(
- f"{BACKEND_API_URL}/api/action-items/{memory_id}",
- json={"status": new_status},
- headers=get_auth_headers()
- )
- response.raise_for_status()
- st.success(f"Status updated to {new_status}")
- logger.info(f"â
Action item status updated successfully")
- st.rerun()
- except requests.exceptions.RequestException as e:
- logger.error(f"â Error updating action item status: {e}")
- st.error(f"Error updating status: {e}")
- else:
- logger.error(f"â No memory ID found for action item")
- st.error("No memory ID found for this action item")
-
- with col3:
- # Delete button
- if st.button("đī¸ Delete", key=f"delete_{i}_{item.get('memory_id', i)}", type="secondary"):
- memory_id = item.get('memory_id')
- if memory_id:
- logger.info(f"đī¸ Deleting action item {memory_id}")
- try:
- response = requests.delete(f"{BACKEND_API_URL}/api/action-items/{memory_id}", headers=get_auth_headers())
- response.raise_for_status()
- st.success("Action item deleted")
- logger.info(f"â
Action item deleted successfully")
- st.rerun()
- except requests.exceptions.RequestException as e:
- logger.error(f"â Error deleting action item: {e}")
- st.error(f"Error deleting action item: {e}")
- else:
- logger.error(f"â No memory ID found for action item")
- st.error("No memory ID found for this action item")
-
- st.divider()
-
- st.caption(f"đĄ **Tip:** Action items are automatically extracted from conversations at the end of each session")
- else:
- if status_filter == "All":
- logger.info(f"đ¯ No action items found for user {user_id_input.strip()}")
- st.info("No action items found for this user.")
- else:
- logger.info(f"đ¯ No action items found with status '{status_filter}' for user {user_id_input.strip()}")
- st.info(f"No action items found with status '{status_filter}' for this user.")
- else:
- logger.info(f"đ¯ No action items found for user {user_id_input.strip()}")
- st.info("No action items found for this user.")
-
- # Show option to create manual action item even when none exist
- if user_id_input.strip() and st.button("â Create First Action Item", key="create_first_item"):
- logger.info("â Creating first action item for user")
- st.session_state['show_add_action_item'] = True
- st.rerun()
with tab_users:
st.header("User Management")