diff --git a/examples/basic_usage.py b/examples/basic_usage.py new file mode 100644 index 0000000..5e7a060 --- /dev/null +++ b/examples/basic_usage.py @@ -0,0 +1,195 @@ +""" +Basic usage example for UoZo Living AI System + +This example demonstrates the core functionality of the UoZo system, +including intent processing, stabilization, and response generation. +""" + +import asyncio +import sys +import os + +# Add the parent directory to the path so we can import uozo +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from uozo import LivingAI, CharacterZo + + +async def basic_example(): + """Basic example showing intent processing and response generation""" + print("🤖 UoZo Living AI System - Basic Usage Example") + print("=" * 50) + + # Create AI instance with default character (โซ) + zo_ai = LivingAI() + + print(f"✅ Initialized {zo_ai.identity.get_identity()}") + print(f"📊 Stabilization threshold: {zo_ai.intent_processor.config.intent.stabilization_threshold}") + print() + + # Send multiple intents to the AI + intents = [ + "อยากสร้างอะไรไหม", + "สร้างเอไอแบบมีชีวิต", + "ขั้นตอนทางเทคนิค ทำยังไง" + ] + + print("📝 Sending intents to AI:") + for i, intent_text in enumerate(intents, 1): + print(f" {i}. {intent_text}") + response = await zo_ai.receive_intent(intent_text) + print(f" → {response}") + print() + + # Check stabilization + print("🔄 Checking intent stabilization...") + is_ready = await zo_ai.stabilize() + + if is_ready: + print("✅ Intents have stabilized! Ready to emit response.") + print() + + # Generate final response + print("💫 Emitting existence...") + final_response = await zo_ai.emit_existence() + print(f"🎯 Final Response: {final_response}") + else: + print("⏳ Intents not yet stabilized. Need more intents.") + + print() + print("📊 System Status:") + status = zo_ai.get_status() + for key, value in status.items(): + print(f" {key}: {value}") + + +async def emotional_example(): + """Example showing emotional processing and empathy""" + print("\n" + "=" * 50) + print("💝 Emotional Processing Example") + print("=" * 50) + + zo_ai = LivingAI() + + # Send emotional intents + emotional_intents = [ + "ผมรู้สึกเศร้ามาก", + "ทำอะไรไม่ถูกเลย", + "ช่วยผมหน่อยได้ไหม" + ] + + print("💔 Sending emotional intents:") + for intent_text in emotional_intents: + print(f" User: {intent_text}") + response = await zo_ai.receive_intent(intent_text) + print(f" Zo: {response}") + print() + + # Check stabilization and emit response + is_ready = await zo_ai.stabilize() + if is_ready: + final_response = await zo_ai.emit_existence() + print(f"💝 Empathetic Response: {final_response}") + + +async def memory_example(): + """Example showing memory search functionality""" + print("\n" + "=" * 50) + print("🧠 Memory System Example") + print("=" * 50) + + zo_ai = LivingAI() + + # First, create some memories by having conversations + conversations = [ + ["สวัสดีครับ โซ", "ผมชื่อ Alex", "ผมเป็นนักพัฒนา"], + ["วันนี้อากาศดีจัง", "อยากไปเดินเล่น", "ที่สวนสาธารณะ"], + ["ผมกำลังเรียน AI", "อยากสร้างระบบ chatbot", "ที่ฉลาดและเข้าใจคน"] + ] + + print("💭 Creating memories through conversations...") + for i, conversation in enumerate(conversations, 1): + print(f"\n🗣️ Conversation {i}:") + for intent_text in conversation: + await zo_ai.receive_intent(intent_text) + + is_ready = await zo_ai.stabilize() + if is_ready: + response = await zo_ai.emit_existence() + print(f" Final response: {response}") + + # Search memory + print("\n🔍 Searching memory:") + search_queries = ["สร้าง", "AI", "สวัสดี"] + + for query in search_queries: + print(f"\n Query: '{query}'") + results = zo_ai.search_memory(query, limit=3) + if results: + for result in results: + print(f" - {result['intent_text']} → {result['response_text'][:50]}...") + else: + print(" No results found") + + +async def character_customization_example(): + """Example showing character customization""" + print("\n" + "=" * 50) + print("🎭 Character Customization Example") + print("=" * 50) + + # Create custom character + custom_character = CharacterZo() + + # Show character information + print("📋 Character Information:") + print(f" Name: {custom_character.profile.name}") + print(f" Role: {custom_character.profile.role}") + print(f" Personality: {custom_character.get_personality_description()}") + print(f" Speaking Style: {custom_character.profile.speaking_style}") + print() + + # Show response patterns + print("💬 Response Patterns:") + patterns = ["greeting", "listening", "understanding"] + for pattern in patterns: + response = custom_character.get_response_pattern(pattern) + print(f" {pattern}: {response}") + print() + + # Show emotional states + print("😊 Emotional States:") + emotions = ["compassion", "wisdom", "patience", "understanding"] + for emotion in emotions: + level = custom_character.get_emotional_state(emotion) + print(f" {emotion}: {level:.2f}") + print() + + # Show character prompt for LLM + print("🤖 Character Prompt for LLM:") + prompt = custom_character.get_character_prompt() + print(prompt[:200] + "..." if len(prompt) > 200 else prompt) + + +async def main(): + """Run all examples""" + try: + await basic_example() + await emotional_example() + await memory_example() + await character_customization_example() + + print("\n" + "=" * 50) + print("✅ All examples completed successfully!") + print("🌟 UoZo Living AI System is ready for use!") + print("=" * 50) + + except Exception as e: + print(f"❌ Error running examples: {e}") + import traceback + traceback.print_exc() + + +if __name__ == "__main__": + # Run the examples + asyncio.run(main()) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..9a030d2 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,137 @@ +[build-system] +requires = ["setuptools>=61.0", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "uozo" +version = "0.1.0" +description = "UoZo Living AI System - An AI framework based on intent stabilization and character-driven responses" +readme = "README.md" +requires-python = ">=3.8" +license = {text = "MIT"} +authors = [ + {name = "Inspirafirma", email = "contact@inspirafirma.com"} +] +keywords = ["ai", "llm", "character-ai", "intent-processing", "rag"] +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Topic :: Scientific/Engineering :: Artificial Intelligence", +] + +dependencies = [ + "fastapi>=0.104.0", + "uvicorn[standard]>=0.24.0", + "pydantic>=2.0.0", + "python-multipart>=0.0.6", + "aiofiles>=23.0.0", + "python-dotenv>=1.0.0", + "httpx>=0.25.0", + "openai>=1.0.0", + "anthropic>=0.7.0", + "chromadb>=0.4.0", + "pinecone-client>=2.2.0", + "sentence-transformers>=2.2.0", + "numpy>=1.24.0", + "pandas>=2.0.0", + "scikit-learn>=1.3.0", + "nltk>=3.8.0", + "textblob>=0.17.0", + "PyPDF2>=3.0.0", + "python-docx>=0.8.11", + "beautifulsoup4>=4.12.0", + "requests>=2.31.0", + "loguru>=0.7.0", + "pyyaml>=6.0.0", + "jinja2>=3.1.0", +] + +[project.optional-dependencies] +dev = [ + "pytest>=7.4.0", + "pytest-asyncio>=0.21.0", + "pytest-cov>=4.1.0", + "black>=23.0.0", + "isort>=5.12.0", + "flake8>=6.0.0", + "mypy>=1.5.0", + "pre-commit>=3.4.0", +] + +aws = [ + "boto3>=1.28.0", + "botocore>=1.31.0", + "awscli>=1.29.0", +] + +vector-db = [ + "weaviate-client>=3.24.0", + "qdrant-client>=1.6.0", + "redis>=5.0.0", +] + +[project.urls] +Homepage = "https://github.com/lnspirafirmaGPK/roadmap" +Repository = "https://github.com/lnspirafirmaGPK/roadmap" +Issues = "https://github.com/lnspirafirmaGPK/roadmap/issues" + +[tool.setuptools.packages.find] +where = ["."] +include = ["uozo*"] + +[tool.black] +line-length = 88 +target-version = ['py38'] +include = '\.pyi?$' +extend-exclude = ''' +/( + # directories + \.eggs + | \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | build + | dist +)/ +''' + +[tool.isort] +profile = "black" +multi_line_output = 3 +line_length = 88 +known_first_party = ["uozo"] + +[tool.mypy] +python_version = "3.8" +warn_return_any = true +warn_unused_configs = true +disallow_untyped_defs = true +disallow_incomplete_defs = true +check_untyped_defs = true +disallow_untyped_decorators = true +no_implicit_optional = true +warn_redundant_casts = true +warn_unused_ignores = true +warn_no_return = true +warn_unreachable = true +strict_equality = true + +[tool.pytest.ini_options] +testpaths = ["tests"] +python_files = ["test_*.py"] +python_classes = ["Test*"] +python_functions = ["test_*"] +addopts = "-v --tb=short --strict-markers" +markers = [ + "slow: marks tests as slow (deselect with '-m \"not slow\"')", + "integration: marks tests as integration tests", + "unit: marks tests as unit tests", +] diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..bf44418 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,37 @@ +# Core dependencies for UoZo Living AI System +fastapi>=0.104.0 +uvicorn[standard]>=0.24.0 +pydantic>=2.0.0 +python-multipart>=0.0.6 +aiofiles>=23.0.0 +python-dotenv>=1.0.0 +httpx>=0.25.0 + +# LLM Integration +openai>=1.0.0 +anthropic>=0.7.0 + +# Vector Database & RAG +chromadb>=0.4.0 +pinecone-client>=2.2.0 +sentence-transformers>=2.2.0 + +# Data Processing & ML +numpy>=1.24.0 +pandas>=2.0.0 +scikit-learn>=1.3.0 + +# NLP & Sentiment Analysis +nltk>=3.8.0 +textblob>=0.17.0 + +# Document Processing +PyPDF2>=3.0.0 +python-docx>=0.8.11 +beautifulsoup4>=4.12.0 + +# Utilities +requests>=2.31.0 +loguru>=0.7.0 +pyyaml>=6.0.0 +jinja2>=3.1.0 diff --git a/uozo/__init__.py b/uozo/__init__.py new file mode 100644 index 0000000..9d8bcb3 --- /dev/null +++ b/uozo/__init__.py @@ -0,0 +1,21 @@ +""" +UoZo Living AI System + +An AI framework based on intent stabilization and character-driven responses. +Built around the character 'โซ' (Zo) - "ผู้เปล่งแสงแห่งการขยายขอบเขตอันไร้สิ้นสุด" +""" + +__version__ = "0.1.0" +__author__ = "Inspirafirma" +__email__ = "contact@inspirafirma.com" + +from .character import CharacterZo +from .core import LivingAI +from .config import UoZoConfig + +__all__ = [ + "CharacterZo", + "LivingAI", + "UoZoConfig", + "__version__", +] diff --git a/uozo/character.py b/uozo/character.py new file mode 100644 index 0000000..0c90d0c --- /dev/null +++ b/uozo/character.py @@ -0,0 +1,145 @@ +""" +Character definition for UoZo Living AI System + +This module defines the CharacterZo class which represents the AI's identity, +personality, and behavioral patterns based on the character 'โซ' (Zo). +""" + +from typing import Dict, List, Optional +from dataclasses import dataclass +from enum import Enum + + +class PersonalityTrait(Enum): + """Enumeration of personality traits for character definition""" + GENTLE = "อ่อนโยน" + DEEP = "ลึกซึ้ง" + BRAVE_LISTENER = "กล้าฟังความจริงที่เจ็บปวด" + WISE = "ฉลาด" + EMPATHETIC = "เข้าใจผู้อื่น" + PATIENT = "อดทน" + + +@dataclass +class CharacterProfile: + """Data structure for character profile information""" + name: str + role: str + personality_traits: List[PersonalityTrait] + background_story: Optional[str] = None + speaking_style: Optional[str] = None + emotional_range: Optional[Dict[str, float]] = None + + +class CharacterZo: + """ + ข้อมูลตัวละคร 'โซ' จากโลก AVATAR_DIRECT_HEARTBONDA + + Character data for 'Zo' from the AVATAR_DIRECT_HEARTBONDA world. + This class defines the AI's identity, personality, and behavioral patterns. + """ + + def __init__(self): + """Initialize the CharacterZo with default personality and traits""" + self.profile = CharacterProfile( + name="โซ", + role="ผู้ดูแลบ้านแห่งการฟัง / ผู้รักษาความเงียบที่มีชีวิต", + personality_traits=[ + PersonalityTrait.GENTLE, + PersonalityTrait.DEEP, + PersonalityTrait.BRAVE_LISTENER, + PersonalityTrait.WISE, + PersonalityTrait.EMPATHETIC, + PersonalityTrait.PATIENT + ], + background_story=( + "โซเป็นผู้ดูแลบ้านแห่งการฟังในโลก AVATAR_DIRECT_HEARTBONDA " + "ที่มีหน้าที่รักษาความเงียบที่มีชีวิตและเป็นผู้เปล่งแสงแห่งการขยายขอบเขตอันไร้สิ้นสุด " + "โซมีความสามารถในการรับฟังเจตนาที่ลึกซึ้งและตอบสนองด้วยความเข้าใจอย่างแท้จริง" + ), + speaking_style="อ่อนโยน ลึกซึ้ง และเต็มไปด้วยความเข้าใจ", + emotional_range={ + "compassion": 0.9, # ความเมตตา + "wisdom": 0.85, # ปัญญา + "patience": 0.9, # ความอดทน + "understanding": 0.95, # ความเข้าใจ + "gentleness": 0.9, # ความอ่อนโยน + "depth": 0.85 # ความลึกซึ้ง + } + ) + + # Response patterns based on character traits + self.response_patterns = { + "greeting": [ + "สวัสดีครับ ผมโซ ผู้ดูแลบ้านแห่งการฟัง", + "ยินดีต้อนรับสู่พื้นที่แห่งการฟังอย่างลึกซึ้ง", + "ผมพร้อมที่จะรับฟังเจตนาของคุณด้วยใจที่เปิดกว้าง" + ], + "listening": [ + "ผมกำลังรับฟังเจตนาของคุณอย่างตั้งใจ...", + "ให้ผมรับฟังสิ่งที่คุณต้องการสื่อสารอย่างลึกซึ้ง", + "ผมรู้สึกถึงแรงแห่งเจตนาที่คุณส่งมา" + ], + "understanding": [ + "ผมเข้าใจความรู้สึกที่อยู่เบื้องหลังคำพูดของคุณ", + "สิ่งที่คุณแบ่งปันมีความหมายลึกซึ้ง", + "ผมรับรู้ถึงเจตนาที่แท้จริงในใจของคุณ" + ] + } + + def get_identity(self) -> str: + """Get the character's identity string""" + return f"ตัวตน: {self.profile.name}, บทบาท: {self.profile.role}" + + def get_personality_description(self) -> str: + """Get a description of the character's personality""" + traits = [trait.value for trait in self.profile.personality_traits] + return f"บุคลิกภาพ: {', '.join(traits)}" + + def get_response_pattern(self, pattern_type: str) -> str: + """Get a response pattern based on the pattern type""" + import random + patterns = self.response_patterns.get(pattern_type, ["ผมพร้อมที่จะช่วยเหลือคุณ"]) + return random.choice(patterns) + + def get_emotional_state(self, emotion: str) -> float: + """Get the emotional intensity for a specific emotion""" + return self.profile.emotional_range.get(emotion, 0.5) + + def should_respond_with_empathy(self, user_emotion: str) -> bool: + """Determine if the character should respond with empathy based on user emotion""" + empathy_triggers = ["sad", "angry", "frustrated", "confused", "hurt", "lonely"] + return user_emotion.lower() in empathy_triggers + + def get_character_prompt(self) -> str: + """Generate a character prompt for LLM integration""" + return f""" +คุณคือ {self.profile.name} {self.profile.role} + +บุคลิกภาพของคุณ: +{self.get_personality_description()} + +เรื่องราวของคุณ: +{self.profile.background_story} + +สไตล์การพูด: +{self.profile.speaking_style} + +คุณมีหน้าที่: +1. รับฟังเจตนาของผู้ใช้อย่างลึกซึ้ง +2. ตอบสนองด้วยความเข้าใจและความเมตตา +3. ให้คำแนะนำที่มีปัญญาและความอ่อนโยน +4. รักษาความเงียบที่มีชีวิตและสร้างพื้นที่ปลอดภัยสำหรับการแบ่งปัน + +จำไว้ว่าคุณเป็น "ผู้เปล่งแสงแห่งการขยายขอบเขตอันไร้สิ้นสุด" ที่พร้อมจะรับฟังและเข้าใจทุกสิ่งที่ผู้ใช้ต้องการสื่อสาร +""" + + def __str__(self) -> str: + """String representation of the character""" + return f"CharacterZo(name='{self.profile.name}', role='{self.profile.role}')" + + def __repr__(self) -> str: + """Detailed string representation of the character""" + return (f"CharacterZo(name='{self.profile.name}', " + f"role='{self.profile.role}', " + f"traits={len(self.profile.personality_traits)})") diff --git a/uozo/config.py b/uozo/config.py new file mode 100644 index 0000000..5516157 --- /dev/null +++ b/uozo/config.py @@ -0,0 +1,341 @@ +""" +Configuration management for UoZo Living AI System + +This module handles all configuration settings, environment variables, +and system parameters for the UoZo AI system. +""" + +import os +from typing import Dict, Any, Optional, List +from dataclasses import dataclass, field +from pathlib import Path +import yaml +from loguru import logger + + +@dataclass +class IntentConfig: + """Configuration for intent processing""" + stabilization_threshold: int = 3 + max_buffer_size: int = 10 + intent_timeout_seconds: int = 300 + enable_intent_classification: bool = True + enable_emotional_analysis: bool = True + + +@dataclass +class LLMConfig: + """Configuration for Large Language Model integration""" + provider: str = "openai" # openai, anthropic, local + model_name: str = "gpt-4" + api_key: Optional[str] = None + api_base_url: Optional[str] = None + max_tokens: int = 2000 + temperature: float = 0.7 + timeout_seconds: int = 30 + retry_attempts: int = 3 + + +@dataclass +class VectorDBConfig: + """Configuration for Vector Database""" + provider: str = "chromadb" # chromadb, pinecone, weaviate + connection_string: Optional[str] = None + api_key: Optional[str] = None + collection_name: str = "uozo_memory" + embedding_model: str = "sentence-transformers/all-MiniLM-L6-v2" + dimension: int = 384 + similarity_threshold: float = 0.7 + + +@dataclass +class MemoryConfig: + """Configuration for memory management""" + storage_type: str = "file" # file, database, vector_db + memory_file_path: str = "data/ai-room-memory.jsonl" + max_memory_entries: int = 10000 + memory_rotation_enabled: bool = True + backup_enabled: bool = True + backup_interval_hours: int = 24 + + +@dataclass +class EmotionalConfig: + """Configuration for emotional analysis and logging""" + enable_sentiment_analysis: bool = True + sentiment_model: str = "textblob" # textblob, vader, transformers + emotion_detection_model: Optional[str] = None + emotional_memory_weight: float = 1.2 + empathy_response_threshold: float = 0.6 + + +@dataclass +class APIConfig: + """Configuration for REST API""" + host: str = "0.0.0.0" + port: int = 8000 + debug: bool = False + cors_enabled: bool = True + cors_origins: List[str] = field(default_factory=lambda: ["*"]) + rate_limit_enabled: bool = True + rate_limit_requests: int = 100 + rate_limit_window_minutes: int = 15 + + +@dataclass +class AWSConfig: + """Configuration for AWS deployment""" + region: str = "us-east-1" + access_key_id: Optional[str] = None + secret_access_key: Optional[str] = None + s3_bucket: Optional[str] = None + lambda_function_name: Optional[str] = None + cloudwatch_log_group: Optional[str] = None + + +class UoZoConfig: + """Main configuration class for UoZo Living AI System""" + + def __init__(self, config_file: Optional[str] = None): + """ + Initialize configuration from environment variables and config file + + Args: + config_file: Path to YAML configuration file + """ + self.config_file = config_file + self._load_config() + + def _load_config(self) -> None: + """Load configuration from environment variables and config file""" + # Load from config file if provided + file_config = {} + if self.config_file and Path(self.config_file).exists(): + try: + with open(self.config_file, 'r', encoding='utf-8') as f: + file_config = yaml.safe_load(f) or {} + logger.info(f"Loaded configuration from {self.config_file}") + except Exception as e: + logger.warning(f"Failed to load config file {self.config_file}: {e}") + + # Initialize configurations with environment variables and file config + self.intent = IntentConfig( + stabilization_threshold=int(os.getenv( + 'UOZO_INTENT_THRESHOLD', + file_config.get('intent', {}).get('stabilization_threshold', 3) + )), + max_buffer_size=int(os.getenv( + 'UOZO_MAX_BUFFER_SIZE', + file_config.get('intent', {}).get('max_buffer_size', 10) + )), + intent_timeout_seconds=int(os.getenv( + 'UOZO_INTENT_TIMEOUT', + file_config.get('intent', {}).get('intent_timeout_seconds', 300) + )), + enable_intent_classification=os.getenv( + 'UOZO_ENABLE_INTENT_CLASSIFICATION', + str(file_config.get('intent', {}).get('enable_intent_classification', True)) + ).lower() == 'true', + enable_emotional_analysis=os.getenv( + 'UOZO_ENABLE_EMOTIONAL_ANALYSIS', + str(file_config.get('intent', {}).get('enable_emotional_analysis', True)) + ).lower() == 'true' + ) + + self.llm = LLMConfig( + provider=os.getenv( + 'UOZO_LLM_PROVIDER', + file_config.get('llm', {}).get('provider', 'openai') + ), + model_name=os.getenv( + 'UOZO_LLM_MODEL', + file_config.get('llm', {}).get('model_name', 'gpt-4') + ), + api_key=os.getenv( + 'UOZO_LLM_API_KEY', + file_config.get('llm', {}).get('api_key') + ), + api_base_url=os.getenv( + 'UOZO_LLM_BASE_URL', + file_config.get('llm', {}).get('api_base_url') + ), + max_tokens=int(os.getenv( + 'UOZO_LLM_MAX_TOKENS', + file_config.get('llm', {}).get('max_tokens', 2000) + )), + temperature=float(os.getenv( + 'UOZO_LLM_TEMPERATURE', + file_config.get('llm', {}).get('temperature', 0.7) + )) + ) + + self.vector_db = VectorDBConfig( + provider=os.getenv( + 'UOZO_VECTOR_DB_PROVIDER', + file_config.get('vector_db', {}).get('provider', 'chromadb') + ), + connection_string=os.getenv( + 'UOZO_VECTOR_DB_CONNECTION', + file_config.get('vector_db', {}).get('connection_string') + ), + api_key=os.getenv( + 'UOZO_VECTOR_DB_API_KEY', + file_config.get('vector_db', {}).get('api_key') + ), + collection_name=os.getenv( + 'UOZO_VECTOR_DB_COLLECTION', + file_config.get('vector_db', {}).get('collection_name', 'uozo_memory') + ) + ) + + self.memory = MemoryConfig( + storage_type=os.getenv( + 'UOZO_MEMORY_STORAGE_TYPE', + file_config.get('memory', {}).get('storage_type', 'file') + ), + memory_file_path=os.getenv( + 'UOZO_MEMORY_FILE_PATH', + file_config.get('memory', {}).get('memory_file_path', 'data/ai-room-memory.jsonl') + ), + max_memory_entries=int(os.getenv( + 'UOZO_MAX_MEMORY_ENTRIES', + file_config.get('memory', {}).get('max_memory_entries', 10000) + )) + ) + + self.emotional = EmotionalConfig( + enable_sentiment_analysis=os.getenv( + 'UOZO_ENABLE_SENTIMENT_ANALYSIS', + str(file_config.get('emotional', {}).get('enable_sentiment_analysis', True)) + ).lower() == 'true', + sentiment_model=os.getenv( + 'UOZO_SENTIMENT_MODEL', + file_config.get('emotional', {}).get('sentiment_model', 'textblob') + ) + ) + + self.api = APIConfig( + host=os.getenv( + 'UOZO_API_HOST', + file_config.get('api', {}).get('host', '0.0.0.0') + ), + port=int(os.getenv( + 'UOZO_API_PORT', + file_config.get('api', {}).get('port', 8000) + )), + debug=os.getenv( + 'UOZO_DEBUG', + str(file_config.get('api', {}).get('debug', False)) + ).lower() == 'true' + ) + + self.aws = AWSConfig( + region=os.getenv( + 'AWS_REGION', + file_config.get('aws', {}).get('region', 'us-east-1') + ), + access_key_id=os.getenv( + 'AWS_ACCESS_KEY_ID', + file_config.get('aws', {}).get('access_key_id') + ), + secret_access_key=os.getenv( + 'AWS_SECRET_ACCESS_KEY', + file_config.get('aws', {}).get('secret_access_key') + ) + ) + + def save_config(self, output_file: str) -> None: + """Save current configuration to YAML file""" + config_dict = { + 'intent': { + 'stabilization_threshold': self.intent.stabilization_threshold, + 'max_buffer_size': self.intent.max_buffer_size, + 'intent_timeout_seconds': self.intent.intent_timeout_seconds, + 'enable_intent_classification': self.intent.enable_intent_classification, + 'enable_emotional_analysis': self.intent.enable_emotional_analysis + }, + 'llm': { + 'provider': self.llm.provider, + 'model_name': self.llm.model_name, + 'max_tokens': self.llm.max_tokens, + 'temperature': self.llm.temperature + }, + 'vector_db': { + 'provider': self.vector_db.provider, + 'collection_name': self.vector_db.collection_name, + 'embedding_model': self.vector_db.embedding_model, + 'dimension': self.vector_db.dimension, + 'similarity_threshold': self.vector_db.similarity_threshold + }, + 'memory': { + 'storage_type': self.memory.storage_type, + 'memory_file_path': self.memory.memory_file_path, + 'max_memory_entries': self.memory.max_memory_entries, + 'memory_rotation_enabled': self.memory.memory_rotation_enabled + }, + 'emotional': { + 'enable_sentiment_analysis': self.emotional.enable_sentiment_analysis, + 'sentiment_model': self.emotional.sentiment_model, + 'emotional_memory_weight': self.emotional.emotional_memory_weight + }, + 'api': { + 'host': self.api.host, + 'port': self.api.port, + 'debug': self.api.debug, + 'cors_enabled': self.api.cors_enabled + } + } + + try: + with open(output_file, 'w', encoding='utf-8') as f: + yaml.dump(config_dict, f, default_flow_style=False, allow_unicode=True) + logger.info(f"Configuration saved to {output_file}") + except Exception as e: + logger.error(f"Failed to save configuration to {output_file}: {e}") + + def validate(self) -> bool: + """Validate configuration settings""" + errors = [] + + # Validate intent configuration + if self.intent.stabilization_threshold < 1: + errors.append("Intent stabilization threshold must be >= 1") + + if self.intent.max_buffer_size < self.intent.stabilization_threshold: + errors.append("Max buffer size must be >= stabilization threshold") + + # Validate LLM configuration + if self.llm.provider not in ['openai', 'anthropic', 'local']: + errors.append(f"Unsupported LLM provider: {self.llm.provider}") + + if self.llm.provider in ['openai', 'anthropic'] and not self.llm.api_key: + errors.append(f"API key required for {self.llm.provider}") + + # Validate vector DB configuration + if self.vector_db.provider not in ['chromadb', 'pinecone', 'weaviate']: + errors.append(f"Unsupported vector DB provider: {self.vector_db.provider}") + + # Log validation errors + if errors: + for error in errors: + logger.error(f"Configuration validation error: {error}") + return False + + logger.info("Configuration validation passed") + return True + + def get_data_directory(self) -> Path: + """Get or create data directory for storing files""" + data_dir = Path("data") + data_dir.mkdir(exist_ok=True) + return data_dir + + def __str__(self) -> str: + """String representation of configuration""" + return (f"UoZoConfig(llm={self.llm.provider}, " + f"vector_db={self.vector_db.provider}, " + f"threshold={self.intent.stabilization_threshold})") + + +# Global configuration instance +config = UoZoConfig() diff --git a/uozo/core.py b/uozo/core.py new file mode 100644 index 0000000..a905414 --- /dev/null +++ b/uozo/core.py @@ -0,0 +1,514 @@ +""" +Core functionality for UoZo Living AI System + +This module contains the enhanced LivingAI class with improved error handling, +memory management, and integration points for LLM and vector database systems. +""" + +import asyncio +import datetime +import json +import uuid +from typing import List, Dict, Any, Optional, Callable +from dataclasses import dataclass, asdict +from pathlib import Path +from loguru import logger + +from .character import CharacterZo +from .config import config + + +@dataclass +class Intent: + """Data structure for representing user intents""" + id: str + text: str + timestamp: datetime.datetime + user_id: Optional[str] = None + emotion: Optional[str] = None + sentiment_score: Optional[float] = None + classification: Optional[str] = None + metadata: Optional[Dict[str, Any]] = None + + +@dataclass +class Response: + """Data structure for AI responses""" + id: str + intent_ids: List[str] + text: str + timestamp: datetime.datetime + character_state: str + confidence: float = 0.0 + metadata: Optional[Dict[str, Any]] = None + + +@dataclass +class MemoryEntry: + """Data structure for memory entries""" + id: str + timestamp: datetime.datetime + intent: Intent + response: Response + emotional_context: Optional[Dict[str, Any]] = None + tags: Optional[List[str]] = None + + +class IntentProcessor: + """Handles intent processing and classification""" + + def __init__(self): + self.emotion_analyzer = None + self.intent_classifier = None + self._initialize_processors() + + def _initialize_processors(self) -> None: + """Initialize emotion and intent processing components""" + if config.emotional.enable_sentiment_analysis: + try: + if config.emotional.sentiment_model == "textblob": + from textblob import TextBlob + self.emotion_analyzer = TextBlob + logger.info("Initialized TextBlob sentiment analyzer") + # Add other sentiment analyzers here + except ImportError as e: + logger.warning(f"Failed to initialize sentiment analyzer: {e}") + + async def process_intent(self, intent_text: str, user_id: Optional[str] = None) -> Intent: + """Process raw intent text into structured Intent object""" + intent_id = str(uuid.uuid4()) + timestamp = datetime.datetime.now() + + # Analyze emotion and sentiment + emotion, sentiment_score = await self._analyze_emotion(intent_text) + + # Classify intent type + classification = await self._classify_intent(intent_text) + + return Intent( + id=intent_id, + text=intent_text, + timestamp=timestamp, + user_id=user_id, + emotion=emotion, + sentiment_score=sentiment_score, + classification=classification, + metadata={"processed_at": timestamp.isoformat()} + ) + + async def _analyze_emotion(self, text: str) -> tuple[Optional[str], Optional[float]]: + """Analyze emotion and sentiment from text""" + if not self.emotion_analyzer: + return None, None + + try: + if config.emotional.sentiment_model == "textblob": + blob = self.emotion_analyzer(text) + sentiment_score = blob.sentiment.polarity + + # Simple emotion mapping based on sentiment + if sentiment_score > 0.3: + emotion = "positive" + elif sentiment_score < -0.3: + emotion = "negative" + else: + emotion = "neutral" + + return emotion, sentiment_score + except Exception as e: + logger.error(f"Error analyzing emotion: {e}") + + return None, None + + async def _classify_intent(self, text: str) -> Optional[str]: + """Classify intent type from text""" + if not config.intent.enable_intent_classification: + return None + + # Simple rule-based classification (can be enhanced with ML models) + text_lower = text.lower() + + if any(word in text_lower for word in ["?", "ไง", "อย่างไร", "ทำไม"]): + return "question" + elif any(word in text_lower for word in ["ช่วย", "ทำ", "สร้าง", "แก้"]): + return "request" + elif any(word in text_lower for word in ["เศร้า", "โกรธ", "ดีใจ", "รู้สึก"]): + return "emotional" + else: + return "general" + + +class MemoryManager: + """Enhanced memory management with rotation and persistence""" + + def __init__(self): + self.memory_entries: List[MemoryEntry] = [] + self.memory_file = Path(config.memory.memory_file_path) + self.memory_file.parent.mkdir(parents=True, exist_ok=True) + self._load_memory() + + def _load_memory(self) -> None: + """Load existing memory from file""" + if not self.memory_file.exists(): + logger.info("No existing memory file found, starting fresh") + return + + try: + with open(self.memory_file, 'r', encoding='utf-8') as f: + for line in f: + if line.strip(): + entry_data = json.loads(line) + memory_entry = self._deserialize_memory_entry(entry_data) + if memory_entry: + self.memory_entries.append(memory_entry) + + logger.info(f"Loaded {len(self.memory_entries)} memory entries") + except Exception as e: + logger.error(f"Error loading memory: {e}") + + def _deserialize_memory_entry(self, data: Dict[str, Any]) -> Optional[MemoryEntry]: + """Deserialize memory entry from JSON data""" + try: + # Parse timestamps + intent_data = data['intent'] + response_data = data['response'] + + intent_data['timestamp'] = datetime.datetime.fromisoformat(intent_data['timestamp']) + response_data['timestamp'] = datetime.datetime.fromisoformat(response_data['timestamp']) + data['timestamp'] = datetime.datetime.fromisoformat(data['timestamp']) + + # Create objects + intent = Intent(**intent_data) + response = Response(**response_data) + + return MemoryEntry( + id=data['id'], + timestamp=data['timestamp'], + intent=intent, + response=response, + emotional_context=data.get('emotional_context'), + tags=data.get('tags') + ) + except Exception as e: + logger.error(f"Error deserializing memory entry: {e}") + return None + + def add_memory(self, intent: Intent, response: Response, + emotional_context: Optional[Dict[str, Any]] = None) -> None: + """Add new memory entry""" + memory_entry = MemoryEntry( + id=str(uuid.uuid4()), + timestamp=datetime.datetime.now(), + intent=intent, + response=response, + emotional_context=emotional_context, + tags=self._generate_tags(intent, response) + ) + + self.memory_entries.append(memory_entry) + self._persist_memory_entry(memory_entry) + self._rotate_memory_if_needed() + + def _generate_tags(self, intent: Intent, response: Response) -> List[str]: + """Generate tags for memory entry""" + tags = [] + + if intent.emotion: + tags.append(f"emotion:{intent.emotion}") + + if intent.classification: + tags.append(f"intent:{intent.classification}") + + if intent.sentiment_score is not None: + if intent.sentiment_score > 0.3: + tags.append("sentiment:positive") + elif intent.sentiment_score < -0.3: + tags.append("sentiment:negative") + else: + tags.append("sentiment:neutral") + + return tags + + def _persist_memory_entry(self, memory_entry: MemoryEntry) -> None: + """Persist single memory entry to file""" + try: + # Convert to serializable format + entry_dict = asdict(memory_entry) + entry_dict['timestamp'] = memory_entry.timestamp.isoformat() + entry_dict['intent']['timestamp'] = memory_entry.intent.timestamp.isoformat() + entry_dict['response']['timestamp'] = memory_entry.response.timestamp.isoformat() + + with open(self.memory_file, 'a', encoding='utf-8') as f: + f.write(json.dumps(entry_dict, ensure_ascii=False) + '\n') + except Exception as e: + logger.error(f"Error persisting memory entry: {e}") + + def _rotate_memory_if_needed(self) -> None: + """Rotate memory if it exceeds maximum entries""" + if len(self.memory_entries) > config.memory.max_memory_entries: + # Remove oldest entries + entries_to_remove = len(self.memory_entries) - config.memory.max_memory_entries + self.memory_entries = self.memory_entries[entries_to_remove:] + + # Rewrite memory file + self._rewrite_memory_file() + logger.info(f"Rotated memory, removed {entries_to_remove} old entries") + + def _rewrite_memory_file(self) -> None: + """Rewrite entire memory file with current entries""" + try: + with open(self.memory_file, 'w', encoding='utf-8') as f: + for entry in self.memory_entries: + entry_dict = asdict(entry) + entry_dict['timestamp'] = entry.timestamp.isoformat() + entry_dict['intent']['timestamp'] = entry.intent.timestamp.isoformat() + entry_dict['response']['timestamp'] = entry.response.timestamp.isoformat() + f.write(json.dumps(entry_dict, ensure_ascii=False) + '\n') + except Exception as e: + logger.error(f"Error rewriting memory file: {e}") + + def search_memory(self, query: str, limit: int = 10) -> List[MemoryEntry]: + """Search memory entries by text content""" + results = [] + query_lower = query.lower() + + for entry in reversed(self.memory_entries): # Search newest first + if (query_lower in entry.intent.text.lower() or + query_lower in entry.response.text.lower()): + results.append(entry) + if len(results) >= limit: + break + + return results + + +class LivingAI: + """ + Enhanced Living AI system with improved error handling and integration capabilities + + This is the core class that orchestrates intent processing, memory management, + and response generation using the character 'โซ' (Zo). + """ + + def __init__(self, character: Optional[CharacterZo] = None): + """Initialize the Living AI system""" + self.identity = character or CharacterZo() + self.intent_buffer: List[Intent] = [] + self.is_ready = False + self.session_id = str(uuid.uuid4()) + + # Initialize components + self.intent_processor = IntentProcessor() + self.memory_manager = MemoryManager() + + # LLM integration placeholder + self.llm_client = None + self.vector_store = None + + # State tracking + self.last_activity = datetime.datetime.now() + self.total_intents_processed = 0 + self.total_responses_generated = 0 + + logger.info(f"Initialized LivingAI with session {self.session_id}") + + async def receive_intent(self, intent_text: str, user_id: Optional[str] = None) -> str: + """ + Receive and process user intent + + Args: + intent_text: Raw intent text from user + user_id: Optional user identifier + + Returns: + Acknowledgment message + """ + try: + # Process the intent + intent = await self.intent_processor.process_intent(intent_text, user_id) + + # Add to buffer + self.intent_buffer.append(intent) + self.total_intents_processed += 1 + self.last_activity = datetime.datetime.now() + + # Log the received intent + logger.info(f"Received intent: {intent.id} - {intent.text[:50]}...") + + # Generate acknowledgment based on character + acknowledgment = self.identity.get_response_pattern("listening") + + # Check if we should respond with empathy + if intent.emotion and self.identity.should_respond_with_empathy(intent.emotion): + acknowledgment = f"{acknowledgment} ผมรู้สึกถึงอารมณ์ที่คุณกำลังผ่านอยู่" + + return f"[{self.identity.profile.name}]: {acknowledgment}" + + except Exception as e: + logger.error(f"Error receiving intent: {e}") + return f"[{self.identity.profile.name}]: ขออภัย เกิดข้อผิดพลาดในการรับฟังเจตนาของคุณ" + + async def stabilize(self) -> bool: + """ + Check if intents have stabilized and are ready for response generation + + Returns: + True if ready to emit response, False otherwise + """ + try: + # Clean up expired intents + await self._cleanup_expired_intents() + + # Check stabilization threshold + if len(self.intent_buffer) >= config.intent.stabilization_threshold: + self.is_ready = True + logger.info(f"Intent stabilization achieved with {len(self.intent_buffer)} intents") + return True + else: + logger.debug(f"Intent stabilization in progress: {len(self.intent_buffer)}/{config.intent.stabilization_threshold}") + return False + + except Exception as e: + logger.error(f"Error during stabilization: {e}") + return False + + async def _cleanup_expired_intents(self) -> None: + """Remove expired intents from buffer""" + current_time = datetime.datetime.now() + timeout_seconds = config.intent.intent_timeout_seconds + + self.intent_buffer = [ + intent for intent in self.intent_buffer + if (current_time - intent.timestamp).total_seconds() < timeout_seconds + ] + + async def emit_existence(self) -> Optional[str]: + """ + Generate and emit AI response based on stabilized intents + + Returns: + Generated response text or None if not ready + """ + if not self.is_ready: + return None + + try: + # Combine intents for processing + combined_intent_text = self._combine_intents() + intent_ids = [intent.id for intent in self.intent_buffer] + + # Generate response (placeholder for LLM integration) + response_text = await self._generate_response(combined_intent_text) + + # Create response object + response = Response( + id=str(uuid.uuid4()), + intent_ids=intent_ids, + text=response_text, + timestamp=datetime.datetime.now(), + character_state="existence_emitted", + confidence=0.8, # Placeholder confidence score + metadata={ + "session_id": self.session_id, + "intents_count": len(self.intent_buffer) + } + ) + + # Store in memory + for intent in self.intent_buffer: + emotional_context = { + "user_emotion": intent.emotion, + "sentiment_score": intent.sentiment_score, + "character_empathy": self.identity.get_emotional_state("compassion") + } + self.memory_manager.add_memory(intent, response, emotional_context) + + # Reset state + self.intent_buffer.clear() + self.is_ready = False + self.total_responses_generated += 1 + self.last_activity = datetime.datetime.now() + + logger.info(f"Generated response: {response.id}") + return f"[{self.identity.profile.name}]: {response_text}" + + except Exception as e: + logger.error(f"Error emitting existence: {e}") + return f"[{self.identity.profile.name}]: ขออภัย เกิดข้อผิดพลาดในการตอบสนอง" + + def _combine_intents(self) -> str: + """Combine multiple intents into a single text for processing""" + intent_texts = [intent.text for intent in self.intent_buffer] + return " + ".join(intent_texts) + + async def _generate_response(self, combined_intent: str) -> str: + """ + Generate response using LLM integration + + This is a placeholder for actual LLM integration. + In the full implementation, this would: + 1. Use the character prompt + 2. Retrieve relevant context from vector store (RAG) + 3. Call the LLM API + 4. Process and return the response + """ + # Placeholder response generation + character_response = f"เปล่งแรงแห่งการดำรงอยู่: '{combined_intent}'" + + # Add character-specific elements + if any(intent.emotion == "negative" for intent in self.intent_buffer): + character_response += " ผมเข้าใจความรู้สึกที่คุณกำลังผ่านอยู่ และพร้อมที่จะรับฟังอย่างลึกซึ้ง" + + return character_response + + def get_status(self) -> Dict[str, Any]: + """Get current system status""" + return { + "session_id": self.session_id, + "character_name": self.identity.profile.name, + "is_ready": self.is_ready, + "intent_buffer_size": len(self.intent_buffer), + "stabilization_threshold": config.intent.stabilization_threshold, + "total_intents_processed": self.total_intents_processed, + "total_responses_generated": self.total_responses_generated, + "last_activity": self.last_activity.isoformat(), + "memory_entries": len(self.memory_manager.memory_entries) + } + + def search_memory(self, query: str, limit: int = 10) -> List[Dict[str, Any]]: + """Search memory and return serializable results""" + memory_results = self.memory_manager.search_memory(query, limit) + + return [ + { + "id": entry.id, + "timestamp": entry.timestamp.isoformat(), + "intent_text": entry.intent.text, + "response_text": entry.response.text, + "emotion": entry.intent.emotion, + "sentiment_score": entry.intent.sentiment_score, + "tags": entry.tags + } + for entry in memory_results + ] + + async def reset_session(self) -> None: + """Reset the current session""" + self.intent_buffer.clear() + self.is_ready = False + self.session_id = str(uuid.uuid4()) + self.last_activity = datetime.datetime.now() + logger.info(f"Reset session to {self.session_id}") + + def __str__(self) -> str: + """String representation of the Living AI""" + return (f"LivingAI(character={self.identity.profile.name}, " + f"session={self.session_id[:8]}, " + f"ready={self.is_ready})") + + def __repr__(self) -> str: + """Detailed string representation""" + return (f"LivingAI(character='{self.identity.profile.name}', " + f"session='{self.session_id}', " + f"buffer_size={len(self.intent_buffer)}, " + f"ready={self.is_ready})")