Deploy TextAI v2 - Clean architecture
Browse files- api/__init__.py +42 -0
- api/endpoints.py +372 -0
- app.py +201 -0
- core/__init__.py +15 -0
- core/config.py +29 -0
- core/logger.py +84 -0
- core/models.py +494 -0
- core/sessions.py +234 -0
- core/state.py +305 -0
- requirements.txt +4 -0
- ui/__init__.py +9 -0
- ui/chat.py +386 -0
- ui/models.py +430 -0
- ui/theme.py +607 -0
api/__init__.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
TextAI v2 API Module
|
| 3 |
+
RESTful API endpoints for both Gradio and local app usage
|
| 4 |
+
"""
|
| 5 |
+
from .endpoints import (
|
| 6 |
+
# Model APIs
|
| 7 |
+
api_list_models,
|
| 8 |
+
api_get_model,
|
| 9 |
+
api_load_model,
|
| 10 |
+
api_unload_model,
|
| 11 |
+
api_delete_model,
|
| 12 |
+
api_search_models,
|
| 13 |
+
api_get_model_files,
|
| 14 |
+
api_download_model,
|
| 15 |
+
|
| 16 |
+
# Session APIs
|
| 17 |
+
api_list_sessions,
|
| 18 |
+
api_get_session,
|
| 19 |
+
api_create_session,
|
| 20 |
+
api_delete_session,
|
| 21 |
+
api_rename_session,
|
| 22 |
+
api_clear_session,
|
| 23 |
+
|
| 24 |
+
# Chat APIs
|
| 25 |
+
api_chat,
|
| 26 |
+
api_inference,
|
| 27 |
+
api_chat_with_config,
|
| 28 |
+
|
| 29 |
+
# System APIs
|
| 30 |
+
api_get_status,
|
| 31 |
+
api_get_backends,
|
| 32 |
+
api_health,
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
__all__ = [
|
| 36 |
+
'api_list_models', 'api_get_model', 'api_load_model', 'api_unload_model',
|
| 37 |
+
'api_delete_model', 'api_search_models', 'api_get_model_files', 'api_download_model',
|
| 38 |
+
'api_list_sessions', 'api_get_session', 'api_create_session',
|
| 39 |
+
'api_delete_session', 'api_rename_session', 'api_clear_session',
|
| 40 |
+
'api_chat', 'api_inference', 'api_chat_with_config',
|
| 41 |
+
'api_get_status', 'api_get_backends', 'api_health',
|
| 42 |
+
]
|
api/endpoints.py
ADDED
|
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
API Endpoints
|
| 3 |
+
All return JSON strings for consistency.
|
| 4 |
+
Can be used by Gradio UI or external apps.
|
| 5 |
+
"""
|
| 6 |
+
import json
|
| 7 |
+
from typing import Optional
|
| 8 |
+
|
| 9 |
+
from ..core.models import get_model_service
|
| 10 |
+
from ..core.sessions import get_session_service
|
| 11 |
+
from ..core.state import get_state
|
| 12 |
+
from ..core.logger import logger
|
| 13 |
+
from ..core.config import VERSION
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def _json_response(success: bool, data: dict = None, error: str = None) -> str:
|
| 17 |
+
"""Standard JSON response format"""
|
| 18 |
+
response = {"success": success}
|
| 19 |
+
if data:
|
| 20 |
+
response.update(data)
|
| 21 |
+
if error:
|
| 22 |
+
response["error"] = error
|
| 23 |
+
return json.dumps(response, indent=2)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 27 |
+
# MODEL APIs
|
| 28 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 29 |
+
|
| 30 |
+
def api_list_models() -> str:
|
| 31 |
+
"""List all installed models"""
|
| 32 |
+
service = get_model_service()
|
| 33 |
+
models = service.get_installed_models()
|
| 34 |
+
loaded_id = get_state().get_loaded_model_id()
|
| 35 |
+
|
| 36 |
+
return _json_response(True, {
|
| 37 |
+
"models": models,
|
| 38 |
+
"count": len(models),
|
| 39 |
+
"loaded_model_id": loaded_id
|
| 40 |
+
})
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def api_get_model(model_id: str) -> str:
|
| 44 |
+
"""Get specific model details"""
|
| 45 |
+
model = get_state().get_model_by_id(model_id)
|
| 46 |
+
if model:
|
| 47 |
+
return _json_response(True, {"model": model})
|
| 48 |
+
return _json_response(False, error="Model not found")
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def api_load_model(model_id: str) -> str:
|
| 52 |
+
"""Load a model"""
|
| 53 |
+
service = get_model_service()
|
| 54 |
+
result = service.load_model(model_id)
|
| 55 |
+
return json.dumps(result)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def api_unload_model() -> str:
|
| 59 |
+
"""Unload current model"""
|
| 60 |
+
service = get_model_service()
|
| 61 |
+
service.unload_model()
|
| 62 |
+
return _json_response(True, {"message": "Model unloaded"})
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def api_delete_model(model_id: str) -> str:
|
| 66 |
+
"""Delete an installed model"""
|
| 67 |
+
service = get_model_service()
|
| 68 |
+
result = service.delete_model(model_id)
|
| 69 |
+
return json.dumps(result)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def api_search_models(query: str = "", max_params: float = 7.0, limit: int = 20) -> str:
|
| 73 |
+
"""Search HuggingFace for models"""
|
| 74 |
+
service = get_model_service()
|
| 75 |
+
results, status = service.search_hf_models(query, max_params, limit)
|
| 76 |
+
return _json_response(True, {
|
| 77 |
+
"results": results,
|
| 78 |
+
"count": len(results),
|
| 79 |
+
"status": status
|
| 80 |
+
})
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def api_get_model_files(repo_id: str) -> str:
|
| 84 |
+
"""Get available files for a HF model"""
|
| 85 |
+
service = get_model_service()
|
| 86 |
+
files = service.get_hf_model_files(repo_id)
|
| 87 |
+
return _json_response(True, {
|
| 88 |
+
"repo_id": repo_id,
|
| 89 |
+
"files": files,
|
| 90 |
+
"count": len(files)
|
| 91 |
+
})
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def api_download_model(repo_id: str, filename: str) -> str:
|
| 95 |
+
"""Download a model from HuggingFace"""
|
| 96 |
+
service = get_model_service()
|
| 97 |
+
result = service.download_model(repo_id, filename)
|
| 98 |
+
return json.dumps(result)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 102 |
+
# SESSION APIs
|
| 103 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 104 |
+
|
| 105 |
+
def api_list_sessions() -> str:
|
| 106 |
+
"""List all sessions"""
|
| 107 |
+
service = get_session_service()
|
| 108 |
+
sessions = service.get_all_sessions()
|
| 109 |
+
return _json_response(True, {
|
| 110 |
+
"sessions": sessions,
|
| 111 |
+
"count": len(sessions),
|
| 112 |
+
"active_session_id": get_state().get_active_session_id()
|
| 113 |
+
})
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def api_get_session(session_id: str) -> str:
|
| 117 |
+
"""Get session with messages"""
|
| 118 |
+
service = get_session_service()
|
| 119 |
+
session = service.get_session(session_id)
|
| 120 |
+
if session:
|
| 121 |
+
return _json_response(True, {"session": session})
|
| 122 |
+
return _json_response(False, error="Session not found")
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def api_create_session(
|
| 126 |
+
title: str = "",
|
| 127 |
+
session_type: str = "chat",
|
| 128 |
+
system_prompt: str = ""
|
| 129 |
+
) -> str:
|
| 130 |
+
"""Create new session"""
|
| 131 |
+
service = get_session_service()
|
| 132 |
+
session = service.create_session(title, session_type, system_prompt)
|
| 133 |
+
return _json_response(True, {
|
| 134 |
+
"session_id": session["id"],
|
| 135 |
+
"title": session["title"]
|
| 136 |
+
})
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def api_delete_session(session_id: str) -> str:
|
| 140 |
+
"""Delete a session"""
|
| 141 |
+
service = get_session_service()
|
| 142 |
+
result = service.delete_session(session_id)
|
| 143 |
+
return _json_response(result, {"message": "Deleted" if result else "Not found"})
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def api_rename_session(session_id: str, new_title: str) -> str:
|
| 147 |
+
"""Rename a session"""
|
| 148 |
+
service = get_session_service()
|
| 149 |
+
result = service.rename_session(session_id, new_title)
|
| 150 |
+
return _json_response(result, {"title": new_title})
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def api_clear_session(session_id: str) -> str:
|
| 154 |
+
"""Clear session messages"""
|
| 155 |
+
service = get_session_service()
|
| 156 |
+
result = service.clear_session(session_id)
|
| 157 |
+
return _json_response(result, {"message": "Cleared"})
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 161 |
+
# CHAT / INFERENCE APIs
|
| 162 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 163 |
+
|
| 164 |
+
def api_chat(
|
| 165 |
+
session_id: str,
|
| 166 |
+
message: str,
|
| 167 |
+
max_tokens: int = 512,
|
| 168 |
+
temperature: float = 0.7
|
| 169 |
+
) -> str:
|
| 170 |
+
"""Send chat message and get response"""
|
| 171 |
+
model_service = get_model_service()
|
| 172 |
+
session_service = get_session_service()
|
| 173 |
+
|
| 174 |
+
# Check model loaded
|
| 175 |
+
if not model_service.is_model_loaded():
|
| 176 |
+
return _json_response(False, error="No model loaded")
|
| 177 |
+
|
| 178 |
+
# Get session
|
| 179 |
+
session = session_service.get_session(session_id)
|
| 180 |
+
if not session:
|
| 181 |
+
return _json_response(False, error="Session not found")
|
| 182 |
+
|
| 183 |
+
# Add user message
|
| 184 |
+
session_service.add_message(session_id, "user", message)
|
| 185 |
+
|
| 186 |
+
# Build messages for model
|
| 187 |
+
messages = []
|
| 188 |
+
if session.get("system_prompt"):
|
| 189 |
+
messages.append({"role": "system", "content": session["system_prompt"]})
|
| 190 |
+
|
| 191 |
+
for msg in session_service.get_messages(session_id):
|
| 192 |
+
messages.append({"role": msg["role"], "content": msg["content"]})
|
| 193 |
+
|
| 194 |
+
# Generate response
|
| 195 |
+
response = model_service.generate(messages, max_tokens, temperature)
|
| 196 |
+
|
| 197 |
+
# Add assistant response
|
| 198 |
+
session_service.add_message(session_id, "assistant", response)
|
| 199 |
+
|
| 200 |
+
return _json_response(True, {
|
| 201 |
+
"response": response,
|
| 202 |
+
"session_id": session_id
|
| 203 |
+
})
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def api_inference(
|
| 207 |
+
prompt: str = "",
|
| 208 |
+
messages: str = "[]",
|
| 209 |
+
system_prompt: str = "",
|
| 210 |
+
max_tokens: int = 512,
|
| 211 |
+
temperature: float = 0.7,
|
| 212 |
+
top_p: float = 0.9,
|
| 213 |
+
context: str = "",
|
| 214 |
+
lora_scale: float = 1.0
|
| 215 |
+
) -> str:
|
| 216 |
+
"""
|
| 217 |
+
Universal inference endpoint.
|
| 218 |
+
Can use direct prompt or message list.
|
| 219 |
+
Supports system prompt, context injection, LoRA scaling (future).
|
| 220 |
+
"""
|
| 221 |
+
model_service = get_model_service()
|
| 222 |
+
|
| 223 |
+
if not model_service.is_model_loaded():
|
| 224 |
+
return _json_response(False, error="No model loaded")
|
| 225 |
+
|
| 226 |
+
try:
|
| 227 |
+
# Parse messages if provided
|
| 228 |
+
msg_list = json.loads(messages) if messages and messages != "[]" else []
|
| 229 |
+
|
| 230 |
+
# Build full message list
|
| 231 |
+
full_messages = []
|
| 232 |
+
|
| 233 |
+
# System prompt
|
| 234 |
+
if system_prompt:
|
| 235 |
+
full_messages.append({"role": "system", "content": system_prompt})
|
| 236 |
+
|
| 237 |
+
# Context injection
|
| 238 |
+
if context:
|
| 239 |
+
full_messages.append({"role": "system", "content": f"Context:\n{context}"})
|
| 240 |
+
|
| 241 |
+
# Conversation messages
|
| 242 |
+
full_messages.extend(msg_list)
|
| 243 |
+
|
| 244 |
+
# Direct prompt
|
| 245 |
+
if prompt:
|
| 246 |
+
full_messages.append({"role": "user", "content": prompt})
|
| 247 |
+
|
| 248 |
+
if not full_messages:
|
| 249 |
+
return _json_response(False, error="No prompt or messages provided")
|
| 250 |
+
|
| 251 |
+
# Generate
|
| 252 |
+
response = model_service.generate(full_messages, max_tokens, temperature, top_p)
|
| 253 |
+
|
| 254 |
+
loaded_model = model_service.get_loaded_model()
|
| 255 |
+
|
| 256 |
+
return _json_response(True, {
|
| 257 |
+
"response": response,
|
| 258 |
+
"model_id": loaded_model["id"] if loaded_model else None,
|
| 259 |
+
"config": {
|
| 260 |
+
"max_tokens": max_tokens,
|
| 261 |
+
"temperature": temperature,
|
| 262 |
+
"top_p": top_p,
|
| 263 |
+
"lora_scale": lora_scale
|
| 264 |
+
}
|
| 265 |
+
})
|
| 266 |
+
|
| 267 |
+
except Exception as e:
|
| 268 |
+
logger.error("API", f"Inference error: {e}")
|
| 269 |
+
return _json_response(False, error=str(e))
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
def api_chat_with_config(
|
| 273 |
+
session_id: str,
|
| 274 |
+
message: str,
|
| 275 |
+
max_tokens: int = 512,
|
| 276 |
+
temperature: float = 0.7,
|
| 277 |
+
top_p: float = 0.9,
|
| 278 |
+
system_prompt_override: str = "",
|
| 279 |
+
context: str = "",
|
| 280 |
+
lora_scale: float = 1.0
|
| 281 |
+
) -> str:
|
| 282 |
+
"""
|
| 283 |
+
Chat with full configuration options.
|
| 284 |
+
Supports: custom inference params, system prompt override, context injection.
|
| 285 |
+
"""
|
| 286 |
+
model_service = get_model_service()
|
| 287 |
+
session_service = get_session_service()
|
| 288 |
+
|
| 289 |
+
if not model_service.is_model_loaded():
|
| 290 |
+
return _json_response(False, error="No model loaded")
|
| 291 |
+
|
| 292 |
+
session = session_service.get_session(session_id)
|
| 293 |
+
if not session:
|
| 294 |
+
return _json_response(False, error="Session not found")
|
| 295 |
+
|
| 296 |
+
# Add user message
|
| 297 |
+
session_service.add_message(session_id, "user", message)
|
| 298 |
+
|
| 299 |
+
# Build messages
|
| 300 |
+
messages = []
|
| 301 |
+
|
| 302 |
+
# System prompt (override or session default)
|
| 303 |
+
sys_prompt = system_prompt_override or session.get("system_prompt", "")
|
| 304 |
+
if sys_prompt:
|
| 305 |
+
messages.append({"role": "system", "content": sys_prompt})
|
| 306 |
+
|
| 307 |
+
# Context injection
|
| 308 |
+
if context:
|
| 309 |
+
messages.append({"role": "system", "content": f"Context:\n{context}"})
|
| 310 |
+
|
| 311 |
+
# Conversation history
|
| 312 |
+
for msg in session_service.get_messages(session_id):
|
| 313 |
+
messages.append({"role": msg["role"], "content": msg["content"]})
|
| 314 |
+
|
| 315 |
+
# Generate
|
| 316 |
+
response = model_service.generate(messages, max_tokens, temperature, top_p)
|
| 317 |
+
|
| 318 |
+
# Add response
|
| 319 |
+
session_service.add_message(session_id, "assistant", response)
|
| 320 |
+
|
| 321 |
+
return _json_response(True, {
|
| 322 |
+
"response": response,
|
| 323 |
+
"session_id": session_id,
|
| 324 |
+
"config_used": {
|
| 325 |
+
"max_tokens": max_tokens,
|
| 326 |
+
"temperature": temperature,
|
| 327 |
+
"top_p": top_p,
|
| 328 |
+
"lora_scale": lora_scale,
|
| 329 |
+
"context_provided": bool(context)
|
| 330 |
+
}
|
| 331 |
+
})
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 335 |
+
# SYSTEM APIs
|
| 336 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 337 |
+
|
| 338 |
+
def api_get_status() -> str:
|
| 339 |
+
"""Get current system status"""
|
| 340 |
+
model_service = get_model_service()
|
| 341 |
+
state = get_state()
|
| 342 |
+
|
| 343 |
+
loaded = model_service.get_loaded_model()
|
| 344 |
+
|
| 345 |
+
return _json_response(True, {
|
| 346 |
+
"version": VERSION,
|
| 347 |
+
"model_loaded": model_service.is_model_loaded(),
|
| 348 |
+
"loaded_model": loaded["name"] if loaded else None,
|
| 349 |
+
"installed_models_count": len(state.get_installed_models()),
|
| 350 |
+
"sessions_count": len(state.get_sessions()),
|
| 351 |
+
"active_session_id": state.get_active_session_id()
|
| 352 |
+
})
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def api_get_backends() -> str:
|
| 356 |
+
"""Check which inference backends are available"""
|
| 357 |
+
from ..core.models import _get_llama_cpp, _get_transformers
|
| 358 |
+
|
| 359 |
+
return _json_response(True, {
|
| 360 |
+
"backends": {
|
| 361 |
+
"gguf": _get_llama_cpp() is not None,
|
| 362 |
+
"transformers": _get_transformers() is not None
|
| 363 |
+
}
|
| 364 |
+
})
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
def api_health() -> str:
|
| 368 |
+
"""Health check endpoint"""
|
| 369 |
+
return _json_response(True, {
|
| 370 |
+
"status": "healthy",
|
| 371 |
+
"version": VERSION
|
| 372 |
+
})
|
app.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
TextAI v2.0
|
| 3 |
+
Clean, Professional AI Chat Interface
|
| 4 |
+
"""
|
| 5 |
+
import gradio as gr
|
| 6 |
+
|
| 7 |
+
from core.config import VERSION, APP_TITLE
|
| 8 |
+
from core.logger import logger
|
| 9 |
+
from core.state import get_state
|
| 10 |
+
from ui.theme import THEME_CSS, get_theme
|
| 11 |
+
from ui.chat import build_chat_ui
|
| 12 |
+
from ui.models import build_model_manager_ui
|
| 13 |
+
from api import api_health, api_get_status
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def create_app():
|
| 17 |
+
"""Create the Gradio application"""
|
| 18 |
+
|
| 19 |
+
# Initialize state on startup
|
| 20 |
+
state = get_state()
|
| 21 |
+
state.sync_with_filesystem()
|
| 22 |
+
logger.info("App", f"TextAI v{VERSION} starting")
|
| 23 |
+
|
| 24 |
+
with gr.Blocks(
|
| 25 |
+
title=APP_TITLE,
|
| 26 |
+
theme=get_theme(),
|
| 27 |
+
css=THEME_CSS
|
| 28 |
+
) as app:
|
| 29 |
+
|
| 30 |
+
with gr.Tabs() as main_tabs:
|
| 31 |
+
|
| 32 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 33 |
+
# TAB 1: CHAT
|
| 34 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 35 |
+
with gr.Tab("Chat", id=0):
|
| 36 |
+
chat_components = build_chat_ui()
|
| 37 |
+
|
| 38 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 39 |
+
# TAB 2: MODELS
|
| 40 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 41 |
+
with gr.Tab("Models", id=1):
|
| 42 |
+
model_components = build_model_manager_ui()
|
| 43 |
+
|
| 44 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 45 |
+
# TAB 3: TOOLS
|
| 46 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 47 |
+
with gr.Tab("Tools", id=2):
|
| 48 |
+
with gr.Tabs():
|
| 49 |
+
|
| 50 |
+
# System Info
|
| 51 |
+
with gr.Tab("System"):
|
| 52 |
+
gr.Markdown("### System Status")
|
| 53 |
+
btn_status = gr.Button("Get Status", variant="primary")
|
| 54 |
+
status_output = gr.JSON(label="Status")
|
| 55 |
+
|
| 56 |
+
btn_status.click(
|
| 57 |
+
lambda: __import__('json').loads(api_get_status()),
|
| 58 |
+
outputs=[status_output]
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
# Logs
|
| 62 |
+
with gr.Tab("Logs"):
|
| 63 |
+
gr.Markdown("### Application Logs")
|
| 64 |
+
with gr.Row():
|
| 65 |
+
log_level = gr.Dropdown(
|
| 66 |
+
choices=["all", "INFO", "WARN", "ERROR", "EVENT"],
|
| 67 |
+
value="all",
|
| 68 |
+
label="Level"
|
| 69 |
+
)
|
| 70 |
+
log_limit = gr.Number(value=50, label="Limit")
|
| 71 |
+
btn_refresh_logs = gr.Button("Refresh")
|
| 72 |
+
btn_clear_logs = gr.Button("Clear", variant="stop")
|
| 73 |
+
|
| 74 |
+
logs_display = gr.TextArea(
|
| 75 |
+
label="",
|
| 76 |
+
lines=20,
|
| 77 |
+
interactive=False
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
def get_logs(level, limit):
|
| 81 |
+
from core.logger import logger as log
|
| 82 |
+
logs = log.get_logs(
|
| 83 |
+
level=level if level != "all" else None,
|
| 84 |
+
limit=int(limit)
|
| 85 |
+
)
|
| 86 |
+
lines = []
|
| 87 |
+
for l in reversed(logs):
|
| 88 |
+
ts = l["timestamp"].split("T")[1][:8]
|
| 89 |
+
lines.append(f"[{ts}] [{l['level']}] [{l['module']}] {l['message']}")
|
| 90 |
+
return "\n".join(lines)
|
| 91 |
+
|
| 92 |
+
def clear_logs(level):
|
| 93 |
+
from core.logger import logger as log
|
| 94 |
+
log.clear(level if level != "all" else None)
|
| 95 |
+
return "Logs cleared"
|
| 96 |
+
|
| 97 |
+
btn_refresh_logs.click(
|
| 98 |
+
get_logs,
|
| 99 |
+
inputs=[log_level, log_limit],
|
| 100 |
+
outputs=[logs_display]
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
btn_clear_logs.click(
|
| 104 |
+
clear_logs,
|
| 105 |
+
inputs=[log_level],
|
| 106 |
+
outputs=[logs_display]
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
# API Info
|
| 110 |
+
with gr.Tab("API"):
|
| 111 |
+
gr.Markdown("### API Endpoints")
|
| 112 |
+
gr.Markdown("""
|
| 113 |
+
All endpoints return JSON. Use for integration with local apps.
|
| 114 |
+
|
| 115 |
+
**Models:**
|
| 116 |
+
- `api_list_models()` - List installed models
|
| 117 |
+
- `api_load_model(model_id)` - Load a model
|
| 118 |
+
- `api_unload_model()` - Unload current model
|
| 119 |
+
- `api_search_models(query, max_params, limit)` - Search HF
|
| 120 |
+
- `api_download_model(repo_id, filename)` - Download model
|
| 121 |
+
|
| 122 |
+
**Sessions:**
|
| 123 |
+
- `api_list_sessions()` - List all sessions
|
| 124 |
+
- `api_create_session(title, type, system_prompt)` - New session
|
| 125 |
+
- `api_delete_session(session_id)` - Delete session
|
| 126 |
+
|
| 127 |
+
**Chat:**
|
| 128 |
+
- `api_chat(session_id, message, max_tokens, temperature)` - Chat
|
| 129 |
+
- `api_inference(prompt, messages, system_prompt, ...)` - Direct inference
|
| 130 |
+
|
| 131 |
+
**System:**
|
| 132 |
+
- `api_health()` - Health check
|
| 133 |
+
- `api_get_status()` - Full status
|
| 134 |
+
- `api_get_backends()` - Available backends
|
| 135 |
+
""")
|
| 136 |
+
|
| 137 |
+
gr.Markdown("### Test")
|
| 138 |
+
btn_health = gr.Button("Health Check")
|
| 139 |
+
health_output = gr.JSON()
|
| 140 |
+
|
| 141 |
+
btn_health.click(
|
| 142 |
+
lambda: __import__('json').loads(api_health()),
|
| 143 |
+
outputs=[health_output]
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 147 |
+
# TAB 4: ABOUT
|
| 148 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 149 |
+
with gr.Tab("About", id=3):
|
| 150 |
+
gr.Markdown(f"""
|
| 151 |
+
# TextAI v{VERSION}
|
| 152 |
+
|
| 153 |
+
**Local AI Chat Assistant**
|
| 154 |
+
|
| 155 |
+
A clean, professional chat interface for running local LLM models.
|
| 156 |
+
Inspired by ChatGPT & Grok.
|
| 157 |
+
|
| 158 |
+
---
|
| 159 |
+
|
| 160 |
+
### Features
|
| 161 |
+
|
| 162 |
+
- π€ **Chat** - Natural conversation with AI
|
| 163 |
+
- π **Session Management** - Auto-save, rename, delete chats
|
| 164 |
+
- π§ **Model Manager** - Download, load, configure models
|
| 165 |
+
- π **HuggingFace Search** - Find and download GGUF models
|
| 166 |
+
- ποΈ **Customization** - System prompts, temperature, tokens
|
| 167 |
+
- π‘ **API** - Full API for integration with other apps
|
| 168 |
+
|
| 169 |
+
---
|
| 170 |
+
|
| 171 |
+
### Supported Models
|
| 172 |
+
|
| 173 |
+
- **GGUF** - Via llama-cpp-python (recommended)
|
| 174 |
+
- **Transformers** - Via HuggingFace transformers
|
| 175 |
+
|
| 176 |
+
---
|
| 177 |
+
|
| 178 |
+
### Keyboard Shortcuts
|
| 179 |
+
|
| 180 |
+
- `Enter` - Send message
|
| 181 |
+
- `Shift+Enter` - New line
|
| 182 |
+
|
| 183 |
+
---
|
| 184 |
+
|
| 185 |
+
Built with β€οΈ using Gradio
|
| 186 |
+
""")
|
| 187 |
+
|
| 188 |
+
return app
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 192 |
+
# MAIN
|
| 193 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 194 |
+
|
| 195 |
+
if __name__ == "__main__":
|
| 196 |
+
app = create_app()
|
| 197 |
+
app.launch(
|
| 198 |
+
server_name="0.0.0.0",
|
| 199 |
+
server_port=7860,
|
| 200 |
+
share=False
|
| 201 |
+
)
|
core/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
TextAI v2 Core Module
|
| 3 |
+
Single source of truth for all state management
|
| 4 |
+
"""
|
| 5 |
+
from .state import StateManager, get_state
|
| 6 |
+
from .models import ModelService
|
| 7 |
+
from .sessions import SessionService
|
| 8 |
+
from .logger import logger
|
| 9 |
+
|
| 10 |
+
__all__ = [
|
| 11 |
+
'StateManager', 'get_state',
|
| 12 |
+
'ModelService',
|
| 13 |
+
'SessionService',
|
| 14 |
+
'logger'
|
| 15 |
+
]
|
core/config.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Configuration - Single place for all paths and constants
|
| 3 |
+
"""
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
# Base directories
|
| 8 |
+
BASE_DIR = Path("/tmp/textai_storage") if os.environ.get("SPACE_ID") else Path(__file__).parent.parent / "storage"
|
| 9 |
+
MODELS_DIR = BASE_DIR / "models"
|
| 10 |
+
SESSIONS_DIR = BASE_DIR / "sessions"
|
| 11 |
+
STATE_FILE = BASE_DIR / "state.json"
|
| 12 |
+
LOGS_DIR = BASE_DIR / "logs"
|
| 13 |
+
|
| 14 |
+
# Ensure directories exist
|
| 15 |
+
for d in [BASE_DIR, MODELS_DIR, SESSIONS_DIR, LOGS_DIR]:
|
| 16 |
+
d.mkdir(parents=True, exist_ok=True)
|
| 17 |
+
|
| 18 |
+
# HuggingFace API
|
| 19 |
+
HF_API_URL = "https://huggingface.co/api"
|
| 20 |
+
HF_TOKEN = os.environ.get("HF_TOKEN", "")
|
| 21 |
+
|
| 22 |
+
# Model constraints (free tier)
|
| 23 |
+
MAX_MODEL_SIZE_GB = 5.0
|
| 24 |
+
MAX_PARAMS_BILLION = 7.0
|
| 25 |
+
RECOMMENDED_QUANTS = ["Q4_K_M", "Q4_K_S", "Q5_K_M"]
|
| 26 |
+
|
| 27 |
+
# UI Constants
|
| 28 |
+
VERSION = "2.0.0"
|
| 29 |
+
APP_TITLE = "TextAI"
|
core/logger.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Centralized Logger
|
| 3 |
+
"""
|
| 4 |
+
import json
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import Optional, Dict, Any, List
|
| 8 |
+
from .config import LOGS_DIR
|
| 9 |
+
|
| 10 |
+
class Logger:
|
| 11 |
+
"""Thread-safe logger with file persistence"""
|
| 12 |
+
|
| 13 |
+
def __init__(self, max_logs: int = 1000):
|
| 14 |
+
self.log_file = LOGS_DIR / "app.log"
|
| 15 |
+
self.max_logs = max_logs
|
| 16 |
+
self._logs: List[Dict] = []
|
| 17 |
+
self._load_logs()
|
| 18 |
+
|
| 19 |
+
def _load_logs(self):
|
| 20 |
+
"""Load existing logs from file"""
|
| 21 |
+
if self.log_file.exists():
|
| 22 |
+
try:
|
| 23 |
+
self._logs = json.loads(self.log_file.read_text())[-self.max_logs:]
|
| 24 |
+
except:
|
| 25 |
+
self._logs = []
|
| 26 |
+
|
| 27 |
+
def _save_logs(self):
|
| 28 |
+
"""Save logs to file"""
|
| 29 |
+
try:
|
| 30 |
+
self.log_file.write_text(json.dumps(self._logs[-self.max_logs:], indent=2))
|
| 31 |
+
except:
|
| 32 |
+
pass
|
| 33 |
+
|
| 34 |
+
def _log(self, level: str, module: str, message: str, data: Optional[Dict] = None):
|
| 35 |
+
"""Internal log method"""
|
| 36 |
+
entry = {
|
| 37 |
+
"timestamp": datetime.now().isoformat(),
|
| 38 |
+
"level": level,
|
| 39 |
+
"module": module,
|
| 40 |
+
"message": message,
|
| 41 |
+
"data": data
|
| 42 |
+
}
|
| 43 |
+
self._logs.append(entry)
|
| 44 |
+
|
| 45 |
+
# Console output
|
| 46 |
+
icons = {"INFO": "βΉοΈ", "WARN": "β οΈ", "ERROR": "β", "EVENT": "π"}
|
| 47 |
+
icon = icons.get(level, "β’")
|
| 48 |
+
timestamp = datetime.now().strftime("%H:%M:%S")
|
| 49 |
+
print(f"[{timestamp}] {icon} [{module}] {message}")
|
| 50 |
+
|
| 51 |
+
# Persist periodically
|
| 52 |
+
if len(self._logs) % 10 == 0:
|
| 53 |
+
self._save_logs()
|
| 54 |
+
|
| 55 |
+
def info(self, module: str, message: str, data: Optional[Dict] = None):
|
| 56 |
+
self._log("INFO", module, message, data)
|
| 57 |
+
|
| 58 |
+
def warn(self, module: str, message: str, data: Optional[Dict] = None):
|
| 59 |
+
self._log("WARN", module, message, data)
|
| 60 |
+
|
| 61 |
+
def error(self, module: str, message: str, data: Optional[Dict] = None):
|
| 62 |
+
self._log("ERROR", module, message, data)
|
| 63 |
+
|
| 64 |
+
def event(self, module: str, message: str, data: Optional[Dict] = None):
|
| 65 |
+
self._log("EVENT", module, message, data)
|
| 66 |
+
|
| 67 |
+
def get_logs(self, level: Optional[str] = None, limit: int = 50) -> List[Dict]:
|
| 68 |
+
"""Get recent logs, optionally filtered by level"""
|
| 69 |
+
logs = self._logs
|
| 70 |
+
if level:
|
| 71 |
+
logs = [l for l in logs if l["level"] == level]
|
| 72 |
+
return logs[-limit:]
|
| 73 |
+
|
| 74 |
+
def clear(self, level: Optional[str] = None):
|
| 75 |
+
"""Clear logs"""
|
| 76 |
+
if level:
|
| 77 |
+
self._logs = [l for l in self._logs if l["level"] != level]
|
| 78 |
+
else:
|
| 79 |
+
self._logs = []
|
| 80 |
+
self._save_logs()
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# Singleton instance
|
| 84 |
+
logger = Logger()
|
core/models.py
ADDED
|
@@ -0,0 +1,494 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Model Service - All model operations
|
| 3 |
+
Handles: loading, inference, downloading, management
|
| 4 |
+
"""
|
| 5 |
+
import re
|
| 6 |
+
import json
|
| 7 |
+
import requests
|
| 8 |
+
import threading
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from datetime import datetime
|
| 11 |
+
from typing import Dict, List, Optional, Tuple, Any
|
| 12 |
+
|
| 13 |
+
from .config import MODELS_DIR, HF_API_URL, HF_TOKEN, RECOMMENDED_QUANTS, MAX_PARAMS_BILLION
|
| 14 |
+
from .state import get_state, InstalledModel
|
| 15 |
+
from .logger import logger
|
| 16 |
+
|
| 17 |
+
# Lazy imports for heavy libraries
|
| 18 |
+
_llama_cpp = None
|
| 19 |
+
_transformers = None
|
| 20 |
+
_torch = None
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def _get_llama_cpp():
|
| 24 |
+
"""Lazy load llama-cpp-python"""
|
| 25 |
+
global _llama_cpp
|
| 26 |
+
if _llama_cpp is None:
|
| 27 |
+
try:
|
| 28 |
+
from llama_cpp import Llama
|
| 29 |
+
_llama_cpp = Llama
|
| 30 |
+
logger.info("Models", "llama-cpp-python loaded")
|
| 31 |
+
except ImportError as e:
|
| 32 |
+
logger.warn("Models", f"llama-cpp-python not available: {e}")
|
| 33 |
+
_llama_cpp = False
|
| 34 |
+
return _llama_cpp if _llama_cpp else None
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _get_transformers():
|
| 38 |
+
"""Lazy load transformers"""
|
| 39 |
+
global _transformers, _torch
|
| 40 |
+
if _transformers is None:
|
| 41 |
+
try:
|
| 42 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 43 |
+
import torch
|
| 44 |
+
_transformers = {"model": AutoModelForCausalLM, "tokenizer": AutoTokenizer}
|
| 45 |
+
_torch = torch
|
| 46 |
+
logger.info("Models", "transformers loaded")
|
| 47 |
+
except ImportError as e:
|
| 48 |
+
logger.warn("Models", f"transformers not available: {e}")
|
| 49 |
+
_transformers = False
|
| 50 |
+
return _transformers if _transformers else None
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class ModelService:
|
| 54 |
+
"""
|
| 55 |
+
Service for all model operations.
|
| 56 |
+
Uses StateManager for persistence.
|
| 57 |
+
"""
|
| 58 |
+
|
| 59 |
+
def __init__(self):
|
| 60 |
+
self._current_model = None
|
| 61 |
+
self._current_tokenizer = None
|
| 62 |
+
self._lock = threading.Lock()
|
| 63 |
+
self._state = get_state()
|
| 64 |
+
|
| 65 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 66 |
+
# MODEL LISTING & INFO
|
| 67 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 68 |
+
|
| 69 |
+
def get_installed_models(self) -> List[Dict]:
|
| 70 |
+
"""Get all installed models from state"""
|
| 71 |
+
return self._state.get_installed_models()
|
| 72 |
+
|
| 73 |
+
def get_loaded_model(self) -> Optional[Dict]:
|
| 74 |
+
"""Get currently loaded model info"""
|
| 75 |
+
model_id = self._state.get_loaded_model_id()
|
| 76 |
+
if model_id:
|
| 77 |
+
return self._state.get_model_by_id(model_id)
|
| 78 |
+
return None
|
| 79 |
+
|
| 80 |
+
def is_model_loaded(self) -> bool:
|
| 81 |
+
"""Check if any model is loaded"""
|
| 82 |
+
return self._current_model is not None
|
| 83 |
+
|
| 84 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 85 |
+
# MODEL LOADING
|
| 86 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 87 |
+
|
| 88 |
+
def load_model(self, model_id: str) -> Dict[str, Any]:
|
| 89 |
+
"""Load a model by ID"""
|
| 90 |
+
with self._lock:
|
| 91 |
+
logger.info("Models", f"Loading model: {model_id}")
|
| 92 |
+
|
| 93 |
+
# Get model info from state
|
| 94 |
+
model_info = self._state.get_model_by_id(model_id)
|
| 95 |
+
if not model_info:
|
| 96 |
+
return {"success": False, "error": f"Model not found: {model_id}"}
|
| 97 |
+
|
| 98 |
+
# Unload current model first
|
| 99 |
+
if self._current_model is not None:
|
| 100 |
+
self.unload_model()
|
| 101 |
+
|
| 102 |
+
# Load based on type
|
| 103 |
+
model_path = MODELS_DIR / model_info["filename"]
|
| 104 |
+
if not model_path.exists():
|
| 105 |
+
return {"success": False, "error": f"Model file not found: {model_path}"}
|
| 106 |
+
|
| 107 |
+
try:
|
| 108 |
+
if model_info["model_type"] == "gguf":
|
| 109 |
+
result = self._load_gguf(model_path)
|
| 110 |
+
else:
|
| 111 |
+
result = self._load_transformers(model_path)
|
| 112 |
+
|
| 113 |
+
if result["success"]:
|
| 114 |
+
self._state.set_loaded_model(model_id)
|
| 115 |
+
|
| 116 |
+
return result
|
| 117 |
+
|
| 118 |
+
except Exception as e:
|
| 119 |
+
logger.error("Models", f"Load failed: {e}")
|
| 120 |
+
return {"success": False, "error": str(e)}
|
| 121 |
+
|
| 122 |
+
def _load_gguf(self, model_path: Path) -> Dict:
|
| 123 |
+
"""Load GGUF model"""
|
| 124 |
+
Llama = _get_llama_cpp()
|
| 125 |
+
if Llama is None:
|
| 126 |
+
return {"success": False, "error": "llama-cpp-python not installed"}
|
| 127 |
+
|
| 128 |
+
try:
|
| 129 |
+
self._current_model = Llama(
|
| 130 |
+
model_path=str(model_path),
|
| 131 |
+
n_ctx=4096,
|
| 132 |
+
n_threads=4,
|
| 133 |
+
n_gpu_layers=0,
|
| 134 |
+
verbose=False
|
| 135 |
+
)
|
| 136 |
+
logger.info("Models", f"GGUF loaded: {model_path.name}")
|
| 137 |
+
return {"success": True, "type": "gguf", "name": model_path.stem}
|
| 138 |
+
except Exception as e:
|
| 139 |
+
return {"success": False, "error": str(e)}
|
| 140 |
+
|
| 141 |
+
def _load_transformers(self, model_path: Path) -> Dict:
|
| 142 |
+
"""Load transformers model"""
|
| 143 |
+
tf = _get_transformers()
|
| 144 |
+
if tf is None:
|
| 145 |
+
return {"success": False, "error": "transformers not installed"}
|
| 146 |
+
|
| 147 |
+
try:
|
| 148 |
+
self._current_tokenizer = tf["tokenizer"].from_pretrained(str(model_path))
|
| 149 |
+
self._current_model = tf["model"].from_pretrained(
|
| 150 |
+
str(model_path),
|
| 151 |
+
torch_dtype=_torch.float32,
|
| 152 |
+
device_map="cpu",
|
| 153 |
+
low_cpu_mem_usage=True
|
| 154 |
+
)
|
| 155 |
+
logger.info("Models", f"Transformers loaded: {model_path.name}")
|
| 156 |
+
return {"success": True, "type": "transformers", "name": model_path.name}
|
| 157 |
+
except Exception as e:
|
| 158 |
+
return {"success": False, "error": str(e)}
|
| 159 |
+
|
| 160 |
+
def unload_model(self):
|
| 161 |
+
"""Unload current model"""
|
| 162 |
+
with self._lock:
|
| 163 |
+
if self._current_model:
|
| 164 |
+
del self._current_model
|
| 165 |
+
self._current_model = None
|
| 166 |
+
if self._current_tokenizer:
|
| 167 |
+
del self._current_tokenizer
|
| 168 |
+
self._current_tokenizer = None
|
| 169 |
+
|
| 170 |
+
self._state.set_loaded_model(None)
|
| 171 |
+
|
| 172 |
+
import gc
|
| 173 |
+
gc.collect()
|
| 174 |
+
logger.info("Models", "Model unloaded")
|
| 175 |
+
|
| 176 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 177 |
+
# INFERENCE
|
| 178 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 179 |
+
|
| 180 |
+
def generate(
|
| 181 |
+
self,
|
| 182 |
+
messages: List[Dict],
|
| 183 |
+
max_tokens: int = 512,
|
| 184 |
+
temperature: float = 0.7,
|
| 185 |
+
top_p: float = 0.9
|
| 186 |
+
) -> str:
|
| 187 |
+
"""Generate response from loaded model"""
|
| 188 |
+
if self._current_model is None:
|
| 189 |
+
return "[Error: No model loaded]"
|
| 190 |
+
|
| 191 |
+
model_info = self.get_loaded_model()
|
| 192 |
+
if not model_info:
|
| 193 |
+
return "[Error: Model info not found]"
|
| 194 |
+
|
| 195 |
+
try:
|
| 196 |
+
if model_info["model_type"] == "gguf":
|
| 197 |
+
return self._generate_gguf(messages, max_tokens, temperature, top_p)
|
| 198 |
+
else:
|
| 199 |
+
return self._generate_transformers(messages, max_tokens, temperature, top_p)
|
| 200 |
+
except Exception as e:
|
| 201 |
+
logger.error("Models", f"Generation error: {e}")
|
| 202 |
+
return f"[Error: {e}]"
|
| 203 |
+
|
| 204 |
+
def _generate_gguf(self, messages: List[Dict], max_tokens: int, temperature: float, top_p: float) -> str:
|
| 205 |
+
"""Generate with GGUF model"""
|
| 206 |
+
response = self._current_model.create_chat_completion(
|
| 207 |
+
messages=messages,
|
| 208 |
+
max_tokens=max_tokens,
|
| 209 |
+
temperature=temperature,
|
| 210 |
+
top_p=top_p,
|
| 211 |
+
stream=False
|
| 212 |
+
)
|
| 213 |
+
return response["choices"][0]["message"]["content"]
|
| 214 |
+
|
| 215 |
+
def _generate_transformers(self, messages: List[Dict], max_tokens: int, temperature: float, top_p: float) -> str:
|
| 216 |
+
"""Generate with transformers model"""
|
| 217 |
+
# Build prompt from messages
|
| 218 |
+
prompt = ""
|
| 219 |
+
for msg in messages:
|
| 220 |
+
role = msg["role"].capitalize()
|
| 221 |
+
prompt += f"{role}: {msg['content']}\n\n"
|
| 222 |
+
prompt += "Assistant: "
|
| 223 |
+
|
| 224 |
+
inputs = self._current_tokenizer(prompt, return_tensors="pt")
|
| 225 |
+
|
| 226 |
+
with _torch.no_grad():
|
| 227 |
+
outputs = self._current_model.generate(
|
| 228 |
+
inputs.input_ids,
|
| 229 |
+
max_new_tokens=max_tokens,
|
| 230 |
+
temperature=temperature,
|
| 231 |
+
top_p=top_p,
|
| 232 |
+
do_sample=True,
|
| 233 |
+
pad_token_id=self._current_tokenizer.eos_token_id
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
response = self._current_tokenizer.decode(
|
| 237 |
+
outputs[0][inputs.input_ids.shape[1]:],
|
| 238 |
+
skip_special_tokens=True
|
| 239 |
+
)
|
| 240 |
+
return response.strip()
|
| 241 |
+
|
| 242 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 243 |
+
# HUGGINGFACE SEARCH & DOWNLOAD
|
| 244 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 245 |
+
|
| 246 |
+
def search_hf_models(
|
| 247 |
+
self,
|
| 248 |
+
query: str = "",
|
| 249 |
+
max_params: float = MAX_PARAMS_BILLION,
|
| 250 |
+
limit: int = 20
|
| 251 |
+
) -> Tuple[List[Dict], str]:
|
| 252 |
+
"""
|
| 253 |
+
Search HuggingFace for GGUF models.
|
| 254 |
+
Returns: (results, status_message)
|
| 255 |
+
"""
|
| 256 |
+
logger.info("Models", f"HF search: {query}")
|
| 257 |
+
|
| 258 |
+
try:
|
| 259 |
+
params = {
|
| 260 |
+
"search": query,
|
| 261 |
+
"library": "gguf",
|
| 262 |
+
"pipeline_tag": "text-generation",
|
| 263 |
+
"sort": "downloads",
|
| 264 |
+
"direction": -1,
|
| 265 |
+
"limit": limit + 20 # Extra for filtering
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
headers = {"Authorization": f"Bearer {HF_TOKEN}"} if HF_TOKEN else {}
|
| 269 |
+
resp = requests.get(f"{HF_API_URL}/models", params=params, headers=headers, timeout=30)
|
| 270 |
+
resp.raise_for_status()
|
| 271 |
+
|
| 272 |
+
results = []
|
| 273 |
+
for m in resp.json():
|
| 274 |
+
model_id = m.get("id", "")
|
| 275 |
+
params_b = self._estimate_params(model_id)
|
| 276 |
+
|
| 277 |
+
# Filter by params
|
| 278 |
+
if max_params and params_b and params_b > max_params:
|
| 279 |
+
continue
|
| 280 |
+
|
| 281 |
+
# Check compatibility
|
| 282 |
+
compat = self._check_compatibility(params_b)
|
| 283 |
+
|
| 284 |
+
results.append({
|
| 285 |
+
"id": model_id,
|
| 286 |
+
"downloads": m.get("downloads", 0),
|
| 287 |
+
"params_b": params_b,
|
| 288 |
+
"est_size_gb": round(params_b * 0.55, 1) if params_b else None,
|
| 289 |
+
"compatibility": compat,
|
| 290 |
+
"is_installed": self._is_repo_installed(model_id)
|
| 291 |
+
})
|
| 292 |
+
|
| 293 |
+
if len(results) >= limit:
|
| 294 |
+
break
|
| 295 |
+
|
| 296 |
+
logger.info("Models", f"HF search found {len(results)} models")
|
| 297 |
+
return results, f"Found {len(results)} models"
|
| 298 |
+
|
| 299 |
+
except Exception as e:
|
| 300 |
+
logger.error("Models", f"HF search error: {e}")
|
| 301 |
+
return [], f"Search failed: {e}"
|
| 302 |
+
|
| 303 |
+
def get_hf_model_files(self, repo_id: str) -> List[Dict]:
|
| 304 |
+
"""Get GGUF files available for a HF model"""
|
| 305 |
+
logger.info("Models", f"Getting files for: {repo_id}")
|
| 306 |
+
|
| 307 |
+
try:
|
| 308 |
+
headers = {"Authorization": f"Bearer {HF_TOKEN}"} if HF_TOKEN else {}
|
| 309 |
+
resp = requests.get(f"{HF_API_URL}/models/{repo_id}", headers=headers, timeout=30)
|
| 310 |
+
resp.raise_for_status()
|
| 311 |
+
|
| 312 |
+
files = []
|
| 313 |
+
for s in resp.json().get("siblings", []):
|
| 314 |
+
filename = s.get("rfilename", "")
|
| 315 |
+
if filename.endswith(".gguf"):
|
| 316 |
+
quant = self._extract_quant(filename)
|
| 317 |
+
files.append({
|
| 318 |
+
"filename": filename,
|
| 319 |
+
"quant": quant,
|
| 320 |
+
"recommended": quant in RECOMMENDED_QUANTS,
|
| 321 |
+
"is_installed": self._state.is_model_installed(repo_id, filename)
|
| 322 |
+
})
|
| 323 |
+
|
| 324 |
+
# Sort: recommended first, then by name
|
| 325 |
+
files.sort(key=lambda x: (not x["recommended"], x["filename"]))
|
| 326 |
+
return files
|
| 327 |
+
|
| 328 |
+
except Exception as e:
|
| 329 |
+
logger.error("Models", f"Get files error: {e}")
|
| 330 |
+
return []
|
| 331 |
+
|
| 332 |
+
def download_model(self, repo_id: str, filename: str) -> Dict[str, Any]:
|
| 333 |
+
"""
|
| 334 |
+
Download a model from HuggingFace.
|
| 335 |
+
Returns: {success, message, model_id}
|
| 336 |
+
"""
|
| 337 |
+
logger.info("Models", f"Downloading: {repo_id}/{filename}")
|
| 338 |
+
|
| 339 |
+
# Check for duplicate
|
| 340 |
+
if self._state.is_model_installed(repo_id, filename):
|
| 341 |
+
return {
|
| 342 |
+
"success": False,
|
| 343 |
+
"error": f"Model already installed: {filename}",
|
| 344 |
+
"duplicate": True
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
try:
|
| 348 |
+
# Download
|
| 349 |
+
url = f"https://huggingface.co/{repo_id}/resolve/main/{filename}"
|
| 350 |
+
headers = {"Authorization": f"Bearer {HF_TOKEN}"} if HF_TOKEN else {}
|
| 351 |
+
|
| 352 |
+
dest_path = MODELS_DIR / filename
|
| 353 |
+
resp = requests.get(url, headers=headers, stream=True, timeout=600)
|
| 354 |
+
resp.raise_for_status()
|
| 355 |
+
|
| 356 |
+
total_size = int(resp.headers.get('content-length', 0))
|
| 357 |
+
downloaded = 0
|
| 358 |
+
|
| 359 |
+
with open(dest_path, 'wb') as f:
|
| 360 |
+
for chunk in resp.iter_content(chunk_size=8192):
|
| 361 |
+
f.write(chunk)
|
| 362 |
+
downloaded += len(chunk)
|
| 363 |
+
|
| 364 |
+
# Create model entry
|
| 365 |
+
params_b = self._estimate_params(repo_id)
|
| 366 |
+
model = InstalledModel(
|
| 367 |
+
id=Path(filename).stem,
|
| 368 |
+
name=self._make_display_name(repo_id, filename),
|
| 369 |
+
hf_repo=repo_id,
|
| 370 |
+
filename=filename,
|
| 371 |
+
model_type="gguf" if filename.endswith(".gguf") else "transformers",
|
| 372 |
+
size_bytes=dest_path.stat().st_size,
|
| 373 |
+
quant=self._extract_quant(filename),
|
| 374 |
+
installed_at=datetime.now().isoformat(),
|
| 375 |
+
params_b=params_b or 0.0
|
| 376 |
+
)
|
| 377 |
+
|
| 378 |
+
# Add to state
|
| 379 |
+
self._state.add_model(model)
|
| 380 |
+
|
| 381 |
+
size_mb = dest_path.stat().st_size / (1024 * 1024)
|
| 382 |
+
logger.info("Models", f"Downloaded: {filename} ({size_mb:.1f} MB)")
|
| 383 |
+
|
| 384 |
+
return {
|
| 385 |
+
"success": True,
|
| 386 |
+
"message": f"Downloaded: {filename} ({size_mb:.1f} MB)",
|
| 387 |
+
"model_id": model.id
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
except Exception as e:
|
| 391 |
+
logger.error("Models", f"Download failed: {e}")
|
| 392 |
+
# Clean up partial download
|
| 393 |
+
if dest_path.exists():
|
| 394 |
+
dest_path.unlink()
|
| 395 |
+
return {"success": False, "error": str(e)}
|
| 396 |
+
|
| 397 |
+
def delete_model(self, model_id: str) -> Dict[str, Any]:
|
| 398 |
+
"""Delete an installed model"""
|
| 399 |
+
logger.info("Models", f"Deleting: {model_id}")
|
| 400 |
+
|
| 401 |
+
model_info = self._state.get_model_by_id(model_id)
|
| 402 |
+
if not model_info:
|
| 403 |
+
return {"success": False, "error": "Model not found"}
|
| 404 |
+
|
| 405 |
+
# Unload if currently loaded
|
| 406 |
+
if self._state.get_loaded_model_id() == model_id:
|
| 407 |
+
self.unload_model()
|
| 408 |
+
|
| 409 |
+
# Delete file
|
| 410 |
+
try:
|
| 411 |
+
model_path = MODELS_DIR / model_info["filename"]
|
| 412 |
+
if model_path.exists():
|
| 413 |
+
model_path.unlink()
|
| 414 |
+
except Exception as e:
|
| 415 |
+
logger.error("Models", f"File delete error: {e}")
|
| 416 |
+
|
| 417 |
+
# Remove from state
|
| 418 |
+
self._state.remove_model(model_id)
|
| 419 |
+
|
| 420 |
+
return {"success": True, "message": f"Deleted: {model_info['name']}"}
|
| 421 |
+
|
| 422 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 423 |
+
# UTILITY METHODS
|
| 424 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 425 |
+
|
| 426 |
+
def _estimate_params(self, model_id: str) -> Optional[float]:
|
| 427 |
+
"""Extract parameter count from model name"""
|
| 428 |
+
name = model_id.lower()
|
| 429 |
+
patterns = [
|
| 430 |
+
r'(\d+\.?\d*)b(?:illion)?',
|
| 431 |
+
r'(\d+\.?\d*)-?b(?:illion)?',
|
| 432 |
+
]
|
| 433 |
+
for pattern in patterns:
|
| 434 |
+
match = re.search(pattern, name)
|
| 435 |
+
if match:
|
| 436 |
+
try:
|
| 437 |
+
return float(match.group(1))
|
| 438 |
+
except:
|
| 439 |
+
pass
|
| 440 |
+
return None
|
| 441 |
+
|
| 442 |
+
def _extract_quant(self, filename: str) -> str:
|
| 443 |
+
"""Extract quantization type from filename"""
|
| 444 |
+
quants = ["Q2_K", "Q3_K_S", "Q3_K_M", "Q3_K_L", "Q4_0", "Q4_K_S", "Q4_K_M",
|
| 445 |
+
"Q5_0", "Q5_K_S", "Q5_K_M", "Q6_K", "Q8_0", "F16", "F32"]
|
| 446 |
+
upper = filename.upper()
|
| 447 |
+
for q in quants:
|
| 448 |
+
if q in upper:
|
| 449 |
+
return q
|
| 450 |
+
return "unknown"
|
| 451 |
+
|
| 452 |
+
def _check_compatibility(self, params_b: Optional[float]) -> Dict:
|
| 453 |
+
"""Check if model is compatible with free tier"""
|
| 454 |
+
if params_b is None:
|
| 455 |
+
return {"status": "unknown", "label": "β Unknown", "ok": True}
|
| 456 |
+
|
| 457 |
+
if params_b <= 1.5:
|
| 458 |
+
return {"status": "best", "label": "β
Best", "ok": True}
|
| 459 |
+
elif params_b <= 3:
|
| 460 |
+
return {"status": "good", "label": "β
Good", "ok": True}
|
| 461 |
+
elif params_b <= 7:
|
| 462 |
+
return {"status": "ok", "label": "β οΈ OK", "ok": True}
|
| 463 |
+
elif params_b <= 13:
|
| 464 |
+
return {"status": "slow", "label": "β οΈ Slow", "ok": False}
|
| 465 |
+
else:
|
| 466 |
+
return {"status": "too_large", "label": "β Too Large", "ok": False}
|
| 467 |
+
|
| 468 |
+
def _make_display_name(self, repo_id: str, filename: str) -> str:
|
| 469 |
+
"""Create a nice display name"""
|
| 470 |
+
# Extract meaningful part from repo or filename
|
| 471 |
+
name = Path(filename).stem
|
| 472 |
+
# Clean up common patterns
|
| 473 |
+
name = re.sub(r'[-_]gguf$', '', name, flags=re.IGNORECASE)
|
| 474 |
+
name = re.sub(r'[-_]q\d.*$', '', name, flags=re.IGNORECASE)
|
| 475 |
+
return name.replace('-', ' ').replace('_', ' ').title()
|
| 476 |
+
|
| 477 |
+
def _is_repo_installed(self, repo_id: str) -> bool:
|
| 478 |
+
"""Check if any model from this repo is installed"""
|
| 479 |
+
for m in self._state.get_installed_models():
|
| 480 |
+
if m["hf_repo"] == repo_id:
|
| 481 |
+
return True
|
| 482 |
+
return False
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
# Singleton
|
| 486 |
+
_model_service: Optional[ModelService] = None
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
def get_model_service() -> ModelService:
|
| 490 |
+
"""Get singleton model service"""
|
| 491 |
+
global _model_service
|
| 492 |
+
if _model_service is None:
|
| 493 |
+
_model_service = ModelService()
|
| 494 |
+
return _model_service
|
core/sessions.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Session Service - Chat session management
|
| 3 |
+
"""
|
| 4 |
+
import json
|
| 5 |
+
import uuid
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from typing import Dict, List, Optional
|
| 9 |
+
|
| 10 |
+
from .config import SESSIONS_DIR
|
| 11 |
+
from .state import get_state
|
| 12 |
+
from .logger import logger
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class SessionService:
|
| 16 |
+
"""
|
| 17 |
+
Manages chat sessions.
|
| 18 |
+
Sessions are stored in state + individual files for messages.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def __init__(self):
|
| 22 |
+
self._state = get_state()
|
| 23 |
+
|
| 24 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 25 |
+
# SESSION CRUD
|
| 26 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 27 |
+
|
| 28 |
+
def create_session(
|
| 29 |
+
self,
|
| 30 |
+
title: str = "",
|
| 31 |
+
session_type: str = "chat",
|
| 32 |
+
system_prompt: str = ""
|
| 33 |
+
) -> Dict:
|
| 34 |
+
"""Create a new chat session"""
|
| 35 |
+
session_id = str(uuid.uuid4())[:8]
|
| 36 |
+
|
| 37 |
+
if not title:
|
| 38 |
+
now = datetime.now()
|
| 39 |
+
prefix = "Chat" if session_type == "chat" else "Roleplay"
|
| 40 |
+
title = f"{prefix} {now.strftime('%m/%d %H:%M')}"
|
| 41 |
+
|
| 42 |
+
session = {
|
| 43 |
+
"id": session_id,
|
| 44 |
+
"title": title,
|
| 45 |
+
"type": session_type,
|
| 46 |
+
"system_prompt": system_prompt,
|
| 47 |
+
"created_at": datetime.now().isoformat(),
|
| 48 |
+
"updated_at": datetime.now().isoformat(),
|
| 49 |
+
"message_count": 0,
|
| 50 |
+
"model_id": self._state.get_loaded_model_id()
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
# Save session metadata to state
|
| 54 |
+
self._state.add_session(session)
|
| 55 |
+
|
| 56 |
+
# Create empty messages file
|
| 57 |
+
self._save_messages(session_id, [])
|
| 58 |
+
|
| 59 |
+
logger.event("Sessions", f"Created: {session_id}", {"title": title})
|
| 60 |
+
return session
|
| 61 |
+
|
| 62 |
+
def get_session(self, session_id: str) -> Optional[Dict]:
|
| 63 |
+
"""Get session by ID with messages"""
|
| 64 |
+
sessions = self._state.get_sessions()
|
| 65 |
+
for s in sessions:
|
| 66 |
+
if s["id"] == session_id:
|
| 67 |
+
session = s.copy()
|
| 68 |
+
session["messages"] = self._load_messages(session_id)
|
| 69 |
+
return session
|
| 70 |
+
return None
|
| 71 |
+
|
| 72 |
+
def get_all_sessions(self) -> List[Dict]:
|
| 73 |
+
"""Get all sessions (without messages)"""
|
| 74 |
+
return self._state.get_sessions()
|
| 75 |
+
|
| 76 |
+
def update_session(self, session_id: str, updates: Dict) -> bool:
|
| 77 |
+
"""Update session metadata"""
|
| 78 |
+
return self._state.update_session(session_id, updates)
|
| 79 |
+
|
| 80 |
+
def delete_session(self, session_id: str) -> bool:
|
| 81 |
+
"""Delete session and its messages"""
|
| 82 |
+
# Delete messages file
|
| 83 |
+
msg_file = SESSIONS_DIR / f"{session_id}.json"
|
| 84 |
+
if msg_file.exists():
|
| 85 |
+
msg_file.unlink()
|
| 86 |
+
|
| 87 |
+
# Remove from state
|
| 88 |
+
result = self._state.delete_session(session_id)
|
| 89 |
+
if result:
|
| 90 |
+
logger.event("Sessions", f"Deleted: {session_id}")
|
| 91 |
+
return result
|
| 92 |
+
|
| 93 |
+
def rename_session(self, session_id: str, new_title: str) -> bool:
|
| 94 |
+
"""Rename a session"""
|
| 95 |
+
return self._state.update_session(session_id, {"title": new_title})
|
| 96 |
+
|
| 97 |
+
def clear_session(self, session_id: str) -> bool:
|
| 98 |
+
"""Clear all messages from a session"""
|
| 99 |
+
self._save_messages(session_id, [])
|
| 100 |
+
self._state.update_session(session_id, {"message_count": 0})
|
| 101 |
+
return True
|
| 102 |
+
|
| 103 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 104 |
+
# MESSAGE HANDLING
|
| 105 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 106 |
+
|
| 107 |
+
def add_message(self, session_id: str, role: str, content: str) -> bool:
|
| 108 |
+
"""Add a message to session"""
|
| 109 |
+
messages = self._load_messages(session_id)
|
| 110 |
+
|
| 111 |
+
message = {
|
| 112 |
+
"role": role,
|
| 113 |
+
"content": content,
|
| 114 |
+
"timestamp": datetime.now().isoformat()
|
| 115 |
+
}
|
| 116 |
+
messages.append(message)
|
| 117 |
+
|
| 118 |
+
self._save_messages(session_id, messages)
|
| 119 |
+
|
| 120 |
+
# Update session metadata
|
| 121 |
+
self._state.update_session(session_id, {
|
| 122 |
+
"message_count": len(messages)
|
| 123 |
+
})
|
| 124 |
+
|
| 125 |
+
# Auto-generate title from first user message
|
| 126 |
+
if role == "user" and len(messages) == 1:
|
| 127 |
+
title = content[:40] + "..." if len(content) > 40 else content
|
| 128 |
+
self._state.update_session(session_id, {"title": title})
|
| 129 |
+
|
| 130 |
+
return True
|
| 131 |
+
|
| 132 |
+
def get_messages(self, session_id: str) -> List[Dict]:
|
| 133 |
+
"""Get all messages for a session"""
|
| 134 |
+
return self._load_messages(session_id)
|
| 135 |
+
|
| 136 |
+
def _load_messages(self, session_id: str) -> List[Dict]:
|
| 137 |
+
"""Load messages from file"""
|
| 138 |
+
msg_file = SESSIONS_DIR / f"{session_id}.json"
|
| 139 |
+
if msg_file.exists():
|
| 140 |
+
try:
|
| 141 |
+
return json.loads(msg_file.read_text())
|
| 142 |
+
except:
|
| 143 |
+
pass
|
| 144 |
+
return []
|
| 145 |
+
|
| 146 |
+
def _save_messages(self, session_id: str, messages: List[Dict]):
|
| 147 |
+
"""Save messages to file"""
|
| 148 |
+
msg_file = SESSIONS_DIR / f"{session_id}.json"
|
| 149 |
+
msg_file.write_text(json.dumps(messages, indent=2))
|
| 150 |
+
|
| 151 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 152 |
+
# UTILITY
|
| 153 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 154 |
+
|
| 155 |
+
def get_active_session(self) -> Optional[Dict]:
|
| 156 |
+
"""Get the currently active session"""
|
| 157 |
+
session_id = self._state.get_active_session_id()
|
| 158 |
+
if session_id:
|
| 159 |
+
return self.get_session(session_id)
|
| 160 |
+
return None
|
| 161 |
+
|
| 162 |
+
def set_active_session(self, session_id: str):
|
| 163 |
+
"""Set active session"""
|
| 164 |
+
self._state.set_active_session(session_id)
|
| 165 |
+
|
| 166 |
+
def get_session_for_display(self, session_id: str) -> Optional[Dict]:
|
| 167 |
+
"""Get session formatted for UI display"""
|
| 168 |
+
session = self.get_session(session_id)
|
| 169 |
+
if not session:
|
| 170 |
+
return None
|
| 171 |
+
|
| 172 |
+
# Format messages for Gradio chatbot (tuple format)
|
| 173 |
+
history = []
|
| 174 |
+
user_msg = None
|
| 175 |
+
for msg in session.get("messages", []):
|
| 176 |
+
if msg["role"] == "user":
|
| 177 |
+
user_msg = msg["content"]
|
| 178 |
+
elif msg["role"] == "assistant" and user_msg:
|
| 179 |
+
history.append((user_msg, msg["content"]))
|
| 180 |
+
user_msg = None
|
| 181 |
+
if user_msg:
|
| 182 |
+
history.append((user_msg, None))
|
| 183 |
+
|
| 184 |
+
return {
|
| 185 |
+
"id": session["id"],
|
| 186 |
+
"title": session["title"],
|
| 187 |
+
"type": session["type"],
|
| 188 |
+
"system_prompt": session["system_prompt"],
|
| 189 |
+
"history": history,
|
| 190 |
+
"message_count": session["message_count"]
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
def group_sessions_by_date(self) -> Dict[str, List[Dict]]:
|
| 194 |
+
"""Group sessions by date for sidebar display"""
|
| 195 |
+
sessions = self.get_all_sessions()
|
| 196 |
+
today = datetime.now().date()
|
| 197 |
+
|
| 198 |
+
groups = {
|
| 199 |
+
"Today": [],
|
| 200 |
+
"Yesterday": [],
|
| 201 |
+
"This Week": [],
|
| 202 |
+
"Older": []
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
for s in sessions:
|
| 206 |
+
try:
|
| 207 |
+
updated = datetime.fromisoformat(s["updated_at"]).date()
|
| 208 |
+
diff = (today - updated).days
|
| 209 |
+
|
| 210 |
+
if diff == 0:
|
| 211 |
+
groups["Today"].append(s)
|
| 212 |
+
elif diff == 1:
|
| 213 |
+
groups["Yesterday"].append(s)
|
| 214 |
+
elif diff < 7:
|
| 215 |
+
groups["This Week"].append(s)
|
| 216 |
+
else:
|
| 217 |
+
groups["Older"].append(s)
|
| 218 |
+
except:
|
| 219 |
+
groups["Older"].append(s)
|
| 220 |
+
|
| 221 |
+
# Remove empty groups
|
| 222 |
+
return {k: v for k, v in groups.items() if v}
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
# Singleton
|
| 226 |
+
_session_service: Optional[SessionService] = None
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def get_session_service() -> SessionService:
|
| 230 |
+
"""Get singleton session service"""
|
| 231 |
+
global _session_service
|
| 232 |
+
if _session_service is None:
|
| 233 |
+
_session_service = SessionService()
|
| 234 |
+
return _session_service
|
core/state.py
ADDED
|
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
State Management - Single Source of Truth
|
| 3 |
+
All state changes go through here
|
| 4 |
+
"""
|
| 5 |
+
import json
|
| 6 |
+
import threading
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
from typing import Dict, List, Optional, Any, Callable
|
| 10 |
+
from dataclasses import dataclass, asdict, field
|
| 11 |
+
|
| 12 |
+
from .config import STATE_FILE, MODELS_DIR
|
| 13 |
+
from .logger import logger
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@dataclass
|
| 17 |
+
class InstalledModel:
|
| 18 |
+
"""Installed model metadata"""
|
| 19 |
+
id: str # Unique ID (filename stem)
|
| 20 |
+
name: str # Display name
|
| 21 |
+
hf_repo: str # HuggingFace repo ID
|
| 22 |
+
filename: str # Actual filename
|
| 23 |
+
model_type: str # "gguf" or "transformers"
|
| 24 |
+
size_bytes: int # File size
|
| 25 |
+
quant: str # Quantization type (Q4_K_M, etc.)
|
| 26 |
+
installed_at: str # ISO timestamp
|
| 27 |
+
system_prompt: str = "" # Custom system prompt
|
| 28 |
+
params_b: float = 0.0 # Estimated params in billions
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@dataclass
|
| 32 |
+
class AppState:
|
| 33 |
+
"""Application state - single source of truth"""
|
| 34 |
+
installed_models: List[Dict] = field(default_factory=list)
|
| 35 |
+
loaded_model_id: Optional[str] = None
|
| 36 |
+
default_model_id: Optional[str] = None
|
| 37 |
+
sessions: List[Dict] = field(default_factory=list)
|
| 38 |
+
active_session_id: Optional[str] = None
|
| 39 |
+
settings: Dict = field(default_factory=dict)
|
| 40 |
+
version: str = "2.0.0"
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class StateManager:
|
| 44 |
+
"""
|
| 45 |
+
Manages all application state with:
|
| 46 |
+
- File persistence
|
| 47 |
+
- Thread safety
|
| 48 |
+
- Change notifications
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def __init__(self):
|
| 52 |
+
self._state: AppState = AppState()
|
| 53 |
+
self._lock = threading.RLock()
|
| 54 |
+
self._subscribers: List[Callable] = []
|
| 55 |
+
self._load_state()
|
| 56 |
+
|
| 57 |
+
def _load_state(self):
|
| 58 |
+
"""Load state from file"""
|
| 59 |
+
with self._lock:
|
| 60 |
+
if STATE_FILE.exists():
|
| 61 |
+
try:
|
| 62 |
+
data = json.loads(STATE_FILE.read_text())
|
| 63 |
+
self._state = AppState(**data)
|
| 64 |
+
logger.info("State", "State loaded from file")
|
| 65 |
+
except Exception as e:
|
| 66 |
+
logger.error("State", f"Failed to load state: {e}")
|
| 67 |
+
self._state = AppState()
|
| 68 |
+
else:
|
| 69 |
+
self._state = AppState()
|
| 70 |
+
self._save_state()
|
| 71 |
+
|
| 72 |
+
def _save_state(self):
|
| 73 |
+
"""Save state to file"""
|
| 74 |
+
try:
|
| 75 |
+
STATE_FILE.write_text(json.dumps(asdict(self._state), indent=2))
|
| 76 |
+
except Exception as e:
|
| 77 |
+
logger.error("State", f"Failed to save state: {e}")
|
| 78 |
+
|
| 79 |
+
def _notify_subscribers(self):
|
| 80 |
+
"""Notify all subscribers of state change"""
|
| 81 |
+
for callback in self._subscribers:
|
| 82 |
+
try:
|
| 83 |
+
callback(self._state)
|
| 84 |
+
except Exception as e:
|
| 85 |
+
logger.error("State", f"Subscriber error: {e}")
|
| 86 |
+
|
| 87 |
+
def subscribe(self, callback: Callable):
|
| 88 |
+
"""Subscribe to state changes"""
|
| 89 |
+
self._subscribers.append(callback)
|
| 90 |
+
|
| 91 |
+
def unsubscribe(self, callback: Callable):
|
| 92 |
+
"""Unsubscribe from state changes"""
|
| 93 |
+
if callback in self._subscribers:
|
| 94 |
+
self._subscribers.remove(callback)
|
| 95 |
+
|
| 96 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 97 |
+
# MODEL STATE OPERATIONS
|
| 98 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 99 |
+
|
| 100 |
+
def get_installed_models(self) -> List[Dict]:
|
| 101 |
+
"""Get all installed models"""
|
| 102 |
+
with self._lock:
|
| 103 |
+
return self._state.installed_models.copy()
|
| 104 |
+
|
| 105 |
+
def get_model_by_id(self, model_id: str) -> Optional[Dict]:
|
| 106 |
+
"""Get specific model by ID"""
|
| 107 |
+
with self._lock:
|
| 108 |
+
for m in self._state.installed_models:
|
| 109 |
+
if m["id"] == model_id:
|
| 110 |
+
return m.copy()
|
| 111 |
+
return None
|
| 112 |
+
|
| 113 |
+
def is_model_installed(self, hf_repo: str, filename: str) -> bool:
|
| 114 |
+
"""Check if model is already installed (duplicate check)"""
|
| 115 |
+
with self._lock:
|
| 116 |
+
for m in self._state.installed_models:
|
| 117 |
+
if m["hf_repo"] == hf_repo and m["filename"] == filename:
|
| 118 |
+
return True
|
| 119 |
+
return False
|
| 120 |
+
|
| 121 |
+
def add_model(self, model: InstalledModel) -> bool:
|
| 122 |
+
"""Add a new installed model"""
|
| 123 |
+
with self._lock:
|
| 124 |
+
# Duplicate check
|
| 125 |
+
if self.is_model_installed(model.hf_repo, model.filename):
|
| 126 |
+
logger.warn("State", f"Model already installed: {model.filename}")
|
| 127 |
+
return False
|
| 128 |
+
|
| 129 |
+
self._state.installed_models.append(asdict(model))
|
| 130 |
+
|
| 131 |
+
# Set as default if first model
|
| 132 |
+
if len(self._state.installed_models) == 1:
|
| 133 |
+
self._state.default_model_id = model.id
|
| 134 |
+
|
| 135 |
+
self._save_state()
|
| 136 |
+
self._notify_subscribers()
|
| 137 |
+
logger.event("State", f"Model added: {model.name}")
|
| 138 |
+
return True
|
| 139 |
+
|
| 140 |
+
def remove_model(self, model_id: str) -> bool:
|
| 141 |
+
"""Remove an installed model"""
|
| 142 |
+
with self._lock:
|
| 143 |
+
for i, m in enumerate(self._state.installed_models):
|
| 144 |
+
if m["id"] == model_id:
|
| 145 |
+
# Unload if currently loaded
|
| 146 |
+
if self._state.loaded_model_id == model_id:
|
| 147 |
+
self._state.loaded_model_id = None
|
| 148 |
+
|
| 149 |
+
# Remove from list
|
| 150 |
+
removed = self._state.installed_models.pop(i)
|
| 151 |
+
|
| 152 |
+
# Update default if needed
|
| 153 |
+
if self._state.default_model_id == model_id:
|
| 154 |
+
self._state.default_model_id = (
|
| 155 |
+
self._state.installed_models[0]["id"]
|
| 156 |
+
if self._state.installed_models else None
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
self._save_state()
|
| 160 |
+
self._notify_subscribers()
|
| 161 |
+
logger.event("State", f"Model removed: {removed['name']}")
|
| 162 |
+
return True
|
| 163 |
+
return False
|
| 164 |
+
|
| 165 |
+
def update_model(self, model_id: str, updates: Dict) -> bool:
|
| 166 |
+
"""Update model properties"""
|
| 167 |
+
with self._lock:
|
| 168 |
+
for m in self._state.installed_models:
|
| 169 |
+
if m["id"] == model_id:
|
| 170 |
+
m.update(updates)
|
| 171 |
+
self._save_state()
|
| 172 |
+
self._notify_subscribers()
|
| 173 |
+
return True
|
| 174 |
+
return False
|
| 175 |
+
|
| 176 |
+
def set_loaded_model(self, model_id: Optional[str]):
|
| 177 |
+
"""Set the currently loaded model"""
|
| 178 |
+
with self._lock:
|
| 179 |
+
self._state.loaded_model_id = model_id
|
| 180 |
+
self._save_state()
|
| 181 |
+
self._notify_subscribers()
|
| 182 |
+
logger.event("State", f"Model loaded: {model_id}")
|
| 183 |
+
|
| 184 |
+
def get_loaded_model_id(self) -> Optional[str]:
|
| 185 |
+
"""Get currently loaded model ID"""
|
| 186 |
+
with self._lock:
|
| 187 |
+
return self._state.loaded_model_id
|
| 188 |
+
|
| 189 |
+
def set_default_model(self, model_id: str):
|
| 190 |
+
"""Set default model"""
|
| 191 |
+
with self._lock:
|
| 192 |
+
self._state.default_model_id = model_id
|
| 193 |
+
self._save_state()
|
| 194 |
+
|
| 195 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 196 |
+
# SESSION STATE OPERATIONS
|
| 197 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 198 |
+
|
| 199 |
+
def get_sessions(self) -> List[Dict]:
|
| 200 |
+
"""Get all sessions"""
|
| 201 |
+
with self._lock:
|
| 202 |
+
return self._state.sessions.copy()
|
| 203 |
+
|
| 204 |
+
def add_session(self, session: Dict) -> str:
|
| 205 |
+
"""Add new session, return session ID"""
|
| 206 |
+
with self._lock:
|
| 207 |
+
self._state.sessions.insert(0, session) # Most recent first
|
| 208 |
+
self._state.active_session_id = session["id"]
|
| 209 |
+
self._save_state()
|
| 210 |
+
self._notify_subscribers()
|
| 211 |
+
return session["id"]
|
| 212 |
+
|
| 213 |
+
def update_session(self, session_id: str, updates: Dict):
|
| 214 |
+
"""Update session"""
|
| 215 |
+
with self._lock:
|
| 216 |
+
for s in self._state.sessions:
|
| 217 |
+
if s["id"] == session_id:
|
| 218 |
+
s.update(updates)
|
| 219 |
+
s["updated_at"] = datetime.now().isoformat()
|
| 220 |
+
self._save_state()
|
| 221 |
+
return True
|
| 222 |
+
return False
|
| 223 |
+
|
| 224 |
+
def delete_session(self, session_id: str) -> bool:
|
| 225 |
+
"""Delete session"""
|
| 226 |
+
with self._lock:
|
| 227 |
+
for i, s in enumerate(self._state.sessions):
|
| 228 |
+
if s["id"] == session_id:
|
| 229 |
+
self._state.sessions.pop(i)
|
| 230 |
+
if self._state.active_session_id == session_id:
|
| 231 |
+
self._state.active_session_id = (
|
| 232 |
+
self._state.sessions[0]["id"]
|
| 233 |
+
if self._state.sessions else None
|
| 234 |
+
)
|
| 235 |
+
self._save_state()
|
| 236 |
+
self._notify_subscribers()
|
| 237 |
+
return True
|
| 238 |
+
return False
|
| 239 |
+
|
| 240 |
+
def set_active_session(self, session_id: str):
|
| 241 |
+
"""Set active session"""
|
| 242 |
+
with self._lock:
|
| 243 |
+
self._state.active_session_id = session_id
|
| 244 |
+
self._save_state()
|
| 245 |
+
|
| 246 |
+
def get_active_session_id(self) -> Optional[str]:
|
| 247 |
+
"""Get active session ID"""
|
| 248 |
+
with self._lock:
|
| 249 |
+
return self._state.active_session_id
|
| 250 |
+
|
| 251 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 252 |
+
# SETTINGS
|
| 253 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββοΏ½οΏ½οΏ½βββββ
|
| 254 |
+
|
| 255 |
+
def get_settings(self) -> Dict:
|
| 256 |
+
"""Get all settings"""
|
| 257 |
+
with self._lock:
|
| 258 |
+
return self._state.settings.copy()
|
| 259 |
+
|
| 260 |
+
def update_settings(self, updates: Dict):
|
| 261 |
+
"""Update settings"""
|
| 262 |
+
with self._lock:
|
| 263 |
+
self._state.settings.update(updates)
|
| 264 |
+
self._save_state()
|
| 265 |
+
|
| 266 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 267 |
+
# UTILITY
|
| 268 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 269 |
+
|
| 270 |
+
def get_full_state(self) -> Dict:
|
| 271 |
+
"""Get complete state (for debugging)"""
|
| 272 |
+
with self._lock:
|
| 273 |
+
return asdict(self._state)
|
| 274 |
+
|
| 275 |
+
def sync_with_filesystem(self):
|
| 276 |
+
"""
|
| 277 |
+
Sync state with actual files on disk.
|
| 278 |
+
Call this on startup to handle manual file changes.
|
| 279 |
+
"""
|
| 280 |
+
with self._lock:
|
| 281 |
+
# Check each installed model still exists
|
| 282 |
+
valid_models = []
|
| 283 |
+
for m in self._state.installed_models:
|
| 284 |
+
model_path = MODELS_DIR / m["filename"]
|
| 285 |
+
if model_path.exists():
|
| 286 |
+
valid_models.append(m)
|
| 287 |
+
else:
|
| 288 |
+
logger.warn("State", f"Model file missing, removing: {m['filename']}")
|
| 289 |
+
|
| 290 |
+
if len(valid_models) != len(self._state.installed_models):
|
| 291 |
+
self._state.installed_models = valid_models
|
| 292 |
+
self._save_state()
|
| 293 |
+
logger.info("State", "State synced with filesystem")
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
# Singleton instance
|
| 297 |
+
_state_manager: Optional[StateManager] = None
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def get_state() -> StateManager:
|
| 301 |
+
"""Get the singleton state manager"""
|
| 302 |
+
global _state_manager
|
| 303 |
+
if _state_manager is None:
|
| 304 |
+
_state_manager = StateManager()
|
| 305 |
+
return _state_manager
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio>=4.0.0
|
| 2 |
+
requests>=2.28.0
|
| 3 |
+
huggingface_hub>=0.16.0
|
| 4 |
+
llama-cpp-python>=0.2.0
|
ui/__init__.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
TextAI v2 UI Module
|
| 3 |
+
Gradio-based interface components
|
| 4 |
+
"""
|
| 5 |
+
from .theme import THEME_CSS, get_theme
|
| 6 |
+
from .chat import build_chat_ui
|
| 7 |
+
from .models import build_model_manager_ui
|
| 8 |
+
|
| 9 |
+
__all__ = ['THEME_CSS', 'get_theme', 'build_chat_ui', 'build_model_manager_ui']
|
ui/chat.py
ADDED
|
@@ -0,0 +1,386 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Chat UI Component
|
| 3 |
+
Clean ChatGPT/Grok style interface
|
| 4 |
+
"""
|
| 5 |
+
import gradio as gr
|
| 6 |
+
from typing import List, Tuple, Optional, Generator
|
| 7 |
+
|
| 8 |
+
from ..core.models import get_model_service
|
| 9 |
+
from ..core.sessions import get_session_service
|
| 10 |
+
from ..core.state import get_state
|
| 11 |
+
from ..core.logger import logger
|
| 12 |
+
|
| 13 |
+
# Default prompts
|
| 14 |
+
DEFAULT_SYSTEM_PROMPT = """You are a helpful AI assistant. Be concise, accurate, and helpful."""
|
| 15 |
+
|
| 16 |
+
ROLEPLAY_SYSTEM_PROMPT = """You are an expert roleplay AI. Stay in character. Be creative and engaging."""
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def build_chat_ui():
|
| 20 |
+
"""Build the chat interface - returns components dict"""
|
| 21 |
+
|
| 22 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 23 |
+
# HELPER FUNCTIONS
|
| 24 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 25 |
+
|
| 26 |
+
def get_sessions_for_sidebar() -> List[List]:
|
| 27 |
+
"""Get sessions formatted for sidebar display"""
|
| 28 |
+
service = get_session_service()
|
| 29 |
+
sessions = service.get_all_sessions()
|
| 30 |
+
return [[s["id"], s["title"][:35] + "..." if len(s["title"]) > 35 else s["title"]]
|
| 31 |
+
for s in sessions[:30]]
|
| 32 |
+
|
| 33 |
+
def get_model_choices() -> List[str]:
|
| 34 |
+
"""Get models for dropdown"""
|
| 35 |
+
service = get_model_service()
|
| 36 |
+
models = service.get_installed_models()
|
| 37 |
+
loaded_id = get_state().get_loaded_model_id()
|
| 38 |
+
|
| 39 |
+
choices = []
|
| 40 |
+
for m in models:
|
| 41 |
+
prefix = "β " if m["id"] == loaded_id else ""
|
| 42 |
+
choices.append(f"{prefix}{m['name']}")
|
| 43 |
+
return choices if choices else ["No models installed"]
|
| 44 |
+
|
| 45 |
+
def get_current_model_display() -> str:
|
| 46 |
+
"""Get current model for header display"""
|
| 47 |
+
service = get_model_service()
|
| 48 |
+
model = service.get_loaded_model()
|
| 49 |
+
if model:
|
| 50 |
+
return f"β {model['name']}"
|
| 51 |
+
return "No model loaded"
|
| 52 |
+
|
| 53 |
+
def format_history(session_id: str) -> List[Tuple[str, str]]:
|
| 54 |
+
"""Format session messages for Gradio chatbot"""
|
| 55 |
+
service = get_session_service()
|
| 56 |
+
display = service.get_session_for_display(session_id)
|
| 57 |
+
return display["history"] if display else []
|
| 58 |
+
|
| 59 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 60 |
+
# EVENT HANDLERS
|
| 61 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 62 |
+
|
| 63 |
+
def on_new_chat(system_prompt: str = ""):
|
| 64 |
+
"""Create new chat session"""
|
| 65 |
+
service = get_session_service()
|
| 66 |
+
session = service.create_session(
|
| 67 |
+
system_prompt=system_prompt or DEFAULT_SYSTEM_PROMPT
|
| 68 |
+
)
|
| 69 |
+
logger.info("Chat", f"New session: {session['id']}")
|
| 70 |
+
return (
|
| 71 |
+
session["id"],
|
| 72 |
+
[],
|
| 73 |
+
get_sessions_for_sidebar(),
|
| 74 |
+
"",
|
| 75 |
+
gr.update(visible=True), # Show chat
|
| 76 |
+
gr.update(visible=False) # Hide welcome
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
def on_session_select(evt: gr.SelectData, sessions_data):
|
| 80 |
+
"""Load selected session"""
|
| 81 |
+
try:
|
| 82 |
+
if evt.index is not None:
|
| 83 |
+
row_idx = evt.index[0] if isinstance(evt.index, (list, tuple)) else evt.index
|
| 84 |
+
if sessions_data and row_idx < len(sessions_data):
|
| 85 |
+
session_id = sessions_data[row_idx][0]
|
| 86 |
+
service = get_session_service()
|
| 87 |
+
service.set_active_session(session_id)
|
| 88 |
+
history = format_history(session_id)
|
| 89 |
+
logger.info("Chat", f"Loaded session: {session_id}")
|
| 90 |
+
return (
|
| 91 |
+
session_id,
|
| 92 |
+
history,
|
| 93 |
+
gr.update(visible=True),
|
| 94 |
+
gr.update(visible=False)
|
| 95 |
+
)
|
| 96 |
+
except Exception as e:
|
| 97 |
+
logger.error("Chat", f"Session select error: {e}")
|
| 98 |
+
return "", [], gr.update(), gr.update()
|
| 99 |
+
|
| 100 |
+
def on_send_message(
|
| 101 |
+
session_id: str,
|
| 102 |
+
message: str,
|
| 103 |
+
history: List,
|
| 104 |
+
max_tokens: int,
|
| 105 |
+
temperature: float,
|
| 106 |
+
system_prompt: str
|
| 107 |
+
) -> Generator:
|
| 108 |
+
"""Send message and stream response"""
|
| 109 |
+
if not message.strip():
|
| 110 |
+
yield history, "", session_id, get_sessions_for_sidebar()
|
| 111 |
+
return
|
| 112 |
+
|
| 113 |
+
model_service = get_model_service()
|
| 114 |
+
session_service = get_session_service()
|
| 115 |
+
|
| 116 |
+
# Check model
|
| 117 |
+
if not model_service.is_model_loaded():
|
| 118 |
+
history = history + [(message, "β οΈ Please load a model first. Go to Model Manager.")]
|
| 119 |
+
yield history, "", session_id, get_sessions_for_sidebar()
|
| 120 |
+
return
|
| 121 |
+
|
| 122 |
+
# Create session if needed
|
| 123 |
+
if not session_id:
|
| 124 |
+
session = session_service.create_session(system_prompt=system_prompt)
|
| 125 |
+
session_id = session["id"]
|
| 126 |
+
|
| 127 |
+
# Add user message to UI
|
| 128 |
+
history = history + [(message, None)]
|
| 129 |
+
yield history, "", session_id, get_sessions_for_sidebar()
|
| 130 |
+
|
| 131 |
+
# Add to session
|
| 132 |
+
session_service.add_message(session_id, "user", message)
|
| 133 |
+
|
| 134 |
+
# Build messages for model
|
| 135 |
+
session = session_service.get_session(session_id)
|
| 136 |
+
messages = []
|
| 137 |
+
if session.get("system_prompt"):
|
| 138 |
+
messages.append({"role": "system", "content": session["system_prompt"]})
|
| 139 |
+
for msg in session.get("messages", []):
|
| 140 |
+
messages.append({"role": msg["role"], "content": msg["content"]})
|
| 141 |
+
|
| 142 |
+
# Generate response
|
| 143 |
+
try:
|
| 144 |
+
response = model_service.generate(messages, max_tokens, temperature)
|
| 145 |
+
session_service.add_message(session_id, "assistant", response)
|
| 146 |
+
history[-1] = (message, response)
|
| 147 |
+
except Exception as e:
|
| 148 |
+
logger.error("Chat", f"Generation error: {e}")
|
| 149 |
+
history[-1] = (message, f"β οΈ Error: {e}")
|
| 150 |
+
|
| 151 |
+
yield history, "", session_id, get_sessions_for_sidebar()
|
| 152 |
+
|
| 153 |
+
def on_load_model(choice: str):
|
| 154 |
+
"""Load selected model"""
|
| 155 |
+
if not choice or choice == "No models installed":
|
| 156 |
+
return get_current_model_display(), "Select a model"
|
| 157 |
+
|
| 158 |
+
# Extract model name (remove β prefix if present)
|
| 159 |
+
name = choice.replace("β ", "")
|
| 160 |
+
|
| 161 |
+
model_service = get_model_service()
|
| 162 |
+
models = model_service.get_installed_models()
|
| 163 |
+
|
| 164 |
+
for m in models:
|
| 165 |
+
if m["name"] == name:
|
| 166 |
+
result = model_service.load_model(m["id"])
|
| 167 |
+
if result["success"]:
|
| 168 |
+
logger.info("Chat", f"Loaded model: {m['name']}")
|
| 169 |
+
return get_current_model_display(), f"β Loaded: {m['name']}"
|
| 170 |
+
else:
|
| 171 |
+
return get_current_model_display(), f"β Error: {result.get('error')}"
|
| 172 |
+
|
| 173 |
+
return get_current_model_display(), "Model not found"
|
| 174 |
+
|
| 175 |
+
def on_delete_session(session_id: str):
|
| 176 |
+
"""Delete current session"""
|
| 177 |
+
if session_id:
|
| 178 |
+
get_session_service().delete_session(session_id)
|
| 179 |
+
return "", [], get_sessions_for_sidebar()
|
| 180 |
+
|
| 181 |
+
def on_clear_session(session_id: str):
|
| 182 |
+
"""Clear current session messages"""
|
| 183 |
+
if session_id:
|
| 184 |
+
get_session_service().clear_session(session_id)
|
| 185 |
+
return []
|
| 186 |
+
|
| 187 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 188 |
+
# BUILD UI
|
| 189 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 190 |
+
|
| 191 |
+
with gr.Row(elem_classes="chat-layout"):
|
| 192 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 193 |
+
# SIDEBAR
|
| 194 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 195 |
+
with gr.Column(scale=1, min_width=260, elem_classes="sidebar"):
|
| 196 |
+
# New Chat Button
|
| 197 |
+
btn_new_chat = gr.Button(
|
| 198 |
+
"οΌ New Chat",
|
| 199 |
+
elem_classes="new-chat-btn",
|
| 200 |
+
size="lg"
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
# Sessions List
|
| 204 |
+
gr.Markdown("### Chats", elem_classes="session-group-title")
|
| 205 |
+
sessions_list = gr.Dataframe(
|
| 206 |
+
headers=["id", "Title"],
|
| 207 |
+
value=get_sessions_for_sidebar(),
|
| 208 |
+
interactive=False,
|
| 209 |
+
show_label=False,
|
| 210 |
+
row_count=20,
|
| 211 |
+
col_count=(2, "fixed"),
|
| 212 |
+
elem_classes="session-list"
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
# Sidebar Footer - Model & Settings
|
| 216 |
+
with gr.Column(elem_classes="sidebar-footer"):
|
| 217 |
+
gr.Markdown("**Model**", elem_classes="text-sm text-muted")
|
| 218 |
+
model_dropdown = gr.Dropdown(
|
| 219 |
+
choices=get_model_choices(),
|
| 220 |
+
value=None,
|
| 221 |
+
label="",
|
| 222 |
+
show_label=False,
|
| 223 |
+
elem_classes="model-selector"
|
| 224 |
+
)
|
| 225 |
+
btn_load_model = gr.Button("Load", size="sm", elem_classes="btn-sm")
|
| 226 |
+
model_status = gr.Textbox(
|
| 227 |
+
value="",
|
| 228 |
+
show_label=False,
|
| 229 |
+
interactive=False,
|
| 230 |
+
elem_classes="text-sm"
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
# βββββββββββββββββββββββββββββββββββββββββοΏ½οΏ½βββββββββββββββββββ
|
| 234 |
+
# MAIN CHAT AREA
|
| 235 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 236 |
+
with gr.Column(scale=4, elem_classes="chat-container"):
|
| 237 |
+
# Header
|
| 238 |
+
with gr.Row(elem_classes="chat-header"):
|
| 239 |
+
current_model = gr.Textbox(
|
| 240 |
+
value=get_current_model_display(),
|
| 241 |
+
show_label=False,
|
| 242 |
+
interactive=False,
|
| 243 |
+
elem_classes="model-badge"
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
with gr.Row():
|
| 247 |
+
btn_settings = gr.Button("βοΈ", size="sm", elem_classes="input-btn")
|
| 248 |
+
btn_delete = gr.Button("ποΈ", size="sm", elem_classes="input-btn")
|
| 249 |
+
|
| 250 |
+
# Hidden state
|
| 251 |
+
current_session_id = gr.State("")
|
| 252 |
+
|
| 253 |
+
# Welcome Screen (shown when no chat)
|
| 254 |
+
with gr.Column(visible=True, elem_classes="welcome-container") as welcome_screen:
|
| 255 |
+
gr.Markdown("# TextAI", elem_classes="welcome-title")
|
| 256 |
+
gr.Markdown("Your local AI assistant", elem_classes="welcome-subtitle")
|
| 257 |
+
|
| 258 |
+
with gr.Row(elem_classes="suggestion-cards"):
|
| 259 |
+
btn_suggest1 = gr.Button("π‘ Explain a concept", elem_classes="suggestion-card")
|
| 260 |
+
btn_suggest2 = gr.Button("βοΈ Help me write", elem_classes="suggestion-card")
|
| 261 |
+
btn_suggest3 = gr.Button("π» Code assistance", elem_classes="suggestion-card")
|
| 262 |
+
btn_suggest4 = gr.Button("π Roleplay", elem_classes="suggestion-card")
|
| 263 |
+
|
| 264 |
+
# Chat Messages
|
| 265 |
+
with gr.Column(visible=False, elem_classes="chat-messages-container") as chat_screen:
|
| 266 |
+
chatbot = gr.Chatbot(
|
| 267 |
+
value=[],
|
| 268 |
+
show_label=False,
|
| 269 |
+
height=500,
|
| 270 |
+
elem_classes="chat-messages",
|
| 271 |
+
layout="bubble"
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
# Input Area
|
| 275 |
+
with gr.Row(elem_classes="input-container"):
|
| 276 |
+
with gr.Row(elem_classes="input-wrapper"):
|
| 277 |
+
btn_attach = gr.Button("π", size="sm", elem_classes="input-btn")
|
| 278 |
+
btn_voice = gr.Button("π€", size="sm", elem_classes="input-btn")
|
| 279 |
+
|
| 280 |
+
chat_input = gr.Textbox(
|
| 281 |
+
placeholder="Message TextAI...",
|
| 282 |
+
show_label=False,
|
| 283 |
+
lines=1,
|
| 284 |
+
max_lines=5,
|
| 285 |
+
elem_classes="chat-input",
|
| 286 |
+
scale=10
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
btn_send = gr.Button("β€", variant="primary", elem_classes="send-btn")
|
| 290 |
+
|
| 291 |
+
# Settings Panel (hidden by default)
|
| 292 |
+
with gr.Column(visible=False, elem_classes="settings-panel") as settings_panel:
|
| 293 |
+
gr.Markdown("### Settings")
|
| 294 |
+
|
| 295 |
+
max_tokens = gr.Slider(
|
| 296 |
+
minimum=64, maximum=2048, value=512, step=64,
|
| 297 |
+
label="Max Tokens"
|
| 298 |
+
)
|
| 299 |
+
temperature = gr.Slider(
|
| 300 |
+
minimum=0.1, maximum=2.0, value=0.7, step=0.1,
|
| 301 |
+
label="Temperature"
|
| 302 |
+
)
|
| 303 |
+
system_prompt = gr.TextArea(
|
| 304 |
+
value=DEFAULT_SYSTEM_PROMPT,
|
| 305 |
+
label="System Prompt",
|
| 306 |
+
lines=3
|
| 307 |
+
)
|
| 308 |
+
btn_close_settings = gr.Button("Close", size="sm")
|
| 309 |
+
|
| 310 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 311 |
+
# WIRE UP EVENTS
|
| 312 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 313 |
+
|
| 314 |
+
# New Chat
|
| 315 |
+
btn_new_chat.click(
|
| 316 |
+
on_new_chat,
|
| 317 |
+
inputs=[system_prompt],
|
| 318 |
+
outputs=[current_session_id, chatbot, sessions_list, chat_input, chat_screen, welcome_screen]
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
# Session select
|
| 322 |
+
sessions_list.select(
|
| 323 |
+
on_session_select,
|
| 324 |
+
inputs=[sessions_list],
|
| 325 |
+
outputs=[current_session_id, chatbot, chat_screen, welcome_screen]
|
| 326 |
+
)
|
| 327 |
+
|
| 328 |
+
# Send message
|
| 329 |
+
btn_send.click(
|
| 330 |
+
on_send_message,
|
| 331 |
+
inputs=[current_session_id, chat_input, chatbot, max_tokens, temperature, system_prompt],
|
| 332 |
+
outputs=[chatbot, chat_input, current_session_id, sessions_list]
|
| 333 |
+
)
|
| 334 |
+
chat_input.submit(
|
| 335 |
+
on_send_message,
|
| 336 |
+
inputs=[current_session_id, chat_input, chatbot, max_tokens, temperature, system_prompt],
|
| 337 |
+
outputs=[chatbot, chat_input, current_session_id, sessions_list]
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
# Load model
|
| 341 |
+
btn_load_model.click(
|
| 342 |
+
on_load_model,
|
| 343 |
+
inputs=[model_dropdown],
|
| 344 |
+
outputs=[current_model, model_status]
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
# Settings toggle
|
| 348 |
+
btn_settings.click(
|
| 349 |
+
lambda: gr.update(visible=True),
|
| 350 |
+
outputs=[settings_panel]
|
| 351 |
+
)
|
| 352 |
+
btn_close_settings.click(
|
| 353 |
+
lambda: gr.update(visible=False),
|
| 354 |
+
outputs=[settings_panel]
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
# Delete session
|
| 358 |
+
btn_delete.click(
|
| 359 |
+
on_delete_session,
|
| 360 |
+
inputs=[current_session_id],
|
| 361 |
+
outputs=[current_session_id, chatbot, sessions_list]
|
| 362 |
+
)
|
| 363 |
+
|
| 364 |
+
# Suggestion buttons (start new chat with specific prompt)
|
| 365 |
+
for btn, prompt in [
|
| 366 |
+
(btn_suggest1, "Explain the concept of machine learning in simple terms."),
|
| 367 |
+
(btn_suggest2, "Help me write a professional email."),
|
| 368 |
+
(btn_suggest3, "Help me debug this code."),
|
| 369 |
+
(btn_suggest4, "Let's do a creative roleplay.")
|
| 370 |
+
]:
|
| 371 |
+
btn.click(
|
| 372 |
+
lambda p=prompt: on_new_chat() + (p,),
|
| 373 |
+
outputs=[current_session_id, chatbot, sessions_list, chat_input, chat_screen, welcome_screen]
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
# Return components for external access
|
| 377 |
+
return {
|
| 378 |
+
"chatbot": chatbot,
|
| 379 |
+
"input": chat_input,
|
| 380 |
+
"sessions": sessions_list,
|
| 381 |
+
"model_dropdown": model_dropdown,
|
| 382 |
+
"current_model": current_model,
|
| 383 |
+
"session_id": current_session_id,
|
| 384 |
+
"refresh_sessions": lambda: get_sessions_for_sidebar(),
|
| 385 |
+
"refresh_models": lambda: get_model_choices()
|
| 386 |
+
}
|
ui/models.py
ADDED
|
@@ -0,0 +1,430 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Model Manager UI Component
|
| 3 |
+
Unified model management with HF search modal
|
| 4 |
+
"""
|
| 5 |
+
import gradio as gr
|
| 6 |
+
from typing import List, Dict, Optional
|
| 7 |
+
|
| 8 |
+
from ..core.models import get_model_service
|
| 9 |
+
from ..core.state import get_state
|
| 10 |
+
from ..core.logger import logger
|
| 11 |
+
from ..core.config import RECOMMENDED_QUANTS
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def build_model_manager_ui():
|
| 15 |
+
"""Build the model manager interface with HF search modal"""
|
| 16 |
+
|
| 17 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 18 |
+
# HELPER FUNCTIONS
|
| 19 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 20 |
+
|
| 21 |
+
def get_installed_models_table() -> List[List]:
|
| 22 |
+
"""Get installed models for table display"""
|
| 23 |
+
service = get_model_service()
|
| 24 |
+
models = service.get_installed_models()
|
| 25 |
+
loaded_id = get_state().get_loaded_model_id()
|
| 26 |
+
|
| 27 |
+
rows = []
|
| 28 |
+
for m in models:
|
| 29 |
+
status = "β Loaded" if m["id"] == loaded_id else "Ready"
|
| 30 |
+
size_mb = m.get("size_bytes", 0) / (1024 * 1024)
|
| 31 |
+
rows.append([
|
| 32 |
+
m["id"],
|
| 33 |
+
m["name"],
|
| 34 |
+
m.get("model_type", "gguf").upper(),
|
| 35 |
+
f"{size_mb:.1f} MB",
|
| 36 |
+
m.get("quant", "-"),
|
| 37 |
+
status
|
| 38 |
+
])
|
| 39 |
+
return rows
|
| 40 |
+
|
| 41 |
+
def get_loaded_model_display() -> str:
|
| 42 |
+
"""Get currently loaded model name"""
|
| 43 |
+
service = get_model_service()
|
| 44 |
+
model = service.get_loaded_model()
|
| 45 |
+
return f"β {model['name']}" if model else "No model loaded"
|
| 46 |
+
|
| 47 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 48 |
+
# EVENT HANDLERS - INSTALLED MODELS
|
| 49 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 50 |
+
|
| 51 |
+
def on_select_model(evt: gr.SelectData, table_data) -> str:
|
| 52 |
+
"""Select model from table"""
|
| 53 |
+
try:
|
| 54 |
+
row_idx = evt.index[0] if isinstance(evt.index, (list, tuple)) else evt.index
|
| 55 |
+
if table_data and row_idx < len(table_data):
|
| 56 |
+
model_id = table_data[row_idx][0]
|
| 57 |
+
logger.info("ModelMgr", f"Selected: {model_id}")
|
| 58 |
+
return model_id
|
| 59 |
+
except Exception as e:
|
| 60 |
+
logger.error("ModelMgr", f"Select error: {e}")
|
| 61 |
+
return ""
|
| 62 |
+
|
| 63 |
+
def on_load_model(model_id: str):
|
| 64 |
+
"""Load selected model"""
|
| 65 |
+
if not model_id:
|
| 66 |
+
return get_installed_models_table(), get_loaded_model_display(), "Select a model first"
|
| 67 |
+
|
| 68 |
+
service = get_model_service()
|
| 69 |
+
result = service.load_model(model_id)
|
| 70 |
+
|
| 71 |
+
if result["success"]:
|
| 72 |
+
return get_installed_models_table(), get_loaded_model_display(), f"β Loaded: {result.get('name', model_id)}"
|
| 73 |
+
else:
|
| 74 |
+
return get_installed_models_table(), get_loaded_model_display(), f"β Error: {result.get('error')}"
|
| 75 |
+
|
| 76 |
+
def on_unload_model():
|
| 77 |
+
"""Unload current model"""
|
| 78 |
+
service = get_model_service()
|
| 79 |
+
service.unload_model()
|
| 80 |
+
return get_installed_models_table(), get_loaded_model_display(), "Model unloaded"
|
| 81 |
+
|
| 82 |
+
def on_delete_model(model_id: str):
|
| 83 |
+
"""Delete selected model"""
|
| 84 |
+
if not model_id:
|
| 85 |
+
return get_installed_models_table(), "", "Select a model first"
|
| 86 |
+
|
| 87 |
+
service = get_model_service()
|
| 88 |
+
result = service.delete_model(model_id)
|
| 89 |
+
|
| 90 |
+
if result["success"]:
|
| 91 |
+
return get_installed_models_table(), "", f"β {result.get('message')}"
|
| 92 |
+
else:
|
| 93 |
+
return get_installed_models_table(), model_id, f"β Error: {result.get('error')}"
|
| 94 |
+
|
| 95 |
+
def on_refresh():
|
| 96 |
+
"""Refresh models table"""
|
| 97 |
+
return get_installed_models_table(), get_loaded_model_display()
|
| 98 |
+
|
| 99 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 100 |
+
# EVENT HANDLERS - HF SEARCH MODAL
|
| 101 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 102 |
+
|
| 103 |
+
def on_search_hf(query: str, max_params: str):
|
| 104 |
+
"""Search HuggingFace for models"""
|
| 105 |
+
logger.info("ModelMgr", f"HF Search: {query}, max_params={max_params}")
|
| 106 |
+
|
| 107 |
+
service = get_model_service()
|
| 108 |
+
max_p = float(max_params) if max_params != "any" else 100.0
|
| 109 |
+
results, status = service.search_hf_models(query, max_p, limit=15)
|
| 110 |
+
|
| 111 |
+
# Format for table
|
| 112 |
+
rows = []
|
| 113 |
+
for r in results:
|
| 114 |
+
params = f"{r['params_b']}B" if r.get('params_b') else "?"
|
| 115 |
+
size = f"~{r['est_size_gb']}GB" if r.get('est_size_gb') else "?"
|
| 116 |
+
compat = r.get('compatibility', {}).get('label', '?')
|
| 117 |
+
downloads = f"{r.get('downloads', 0):,}"
|
| 118 |
+
installed = "β" if r.get('is_installed') else ""
|
| 119 |
+
|
| 120 |
+
rows.append([r['id'], params, size, compat, downloads, installed])
|
| 121 |
+
|
| 122 |
+
return rows, status
|
| 123 |
+
|
| 124 |
+
def on_select_hf_model(evt: gr.SelectData, table_data) -> tuple:
|
| 125 |
+
"""Select model from HF search results"""
|
| 126 |
+
try:
|
| 127 |
+
row_idx = evt.index[0] if isinstance(evt.index, (list, tuple)) else evt.index
|
| 128 |
+
if table_data and row_idx < len(table_data):
|
| 129 |
+
repo_id = table_data[row_idx][0]
|
| 130 |
+
logger.info("ModelMgr", f"Selected HF model: {repo_id}")
|
| 131 |
+
return repo_id, gr.update(visible=True)
|
| 132 |
+
except Exception as e:
|
| 133 |
+
logger.error("ModelMgr", f"HF select error: {e}")
|
| 134 |
+
return "", gr.update(visible=False)
|
| 135 |
+
|
| 136 |
+
def on_get_files(repo_id: str):
|
| 137 |
+
"""Get GGUF files for selected model"""
|
| 138 |
+
if not repo_id:
|
| 139 |
+
return [], "Select a model first"
|
| 140 |
+
|
| 141 |
+
logger.info("ModelMgr", f"Getting files for: {repo_id}")
|
| 142 |
+
service = get_model_service()
|
| 143 |
+
files = service.get_hf_model_files(repo_id)
|
| 144 |
+
|
| 145 |
+
if not files:
|
| 146 |
+
return [], "No GGUF files found"
|
| 147 |
+
|
| 148 |
+
# Format for display with radio selection
|
| 149 |
+
rows = []
|
| 150 |
+
for f in files:
|
| 151 |
+
rec = "β
Recommended" if f["recommended"] else ""
|
| 152 |
+
installed = "β Installed" if f["is_installed"] else ""
|
| 153 |
+
rows.append([f["filename"], f["quant"], rec, installed])
|
| 154 |
+
|
| 155 |
+
return rows, f"Found {len(files)} files"
|
| 156 |
+
|
| 157 |
+
def on_select_file(evt: gr.SelectData, table_data) -> str:
|
| 158 |
+
"""Select file from files list"""
|
| 159 |
+
try:
|
| 160 |
+
row_idx = evt.index[0] if isinstance(evt.index, (list, tuple)) else evt.index
|
| 161 |
+
if table_data and row_idx < len(table_data):
|
| 162 |
+
filename = table_data[row_idx][0]
|
| 163 |
+
return filename
|
| 164 |
+
except:
|
| 165 |
+
pass
|
| 166 |
+
return ""
|
| 167 |
+
|
| 168 |
+
def on_download_model(repo_id: str, filename: str):
|
| 169 |
+
"""Download selected model"""
|
| 170 |
+
if not repo_id or not filename:
|
| 171 |
+
return "Select a model and file first", get_installed_models_table()
|
| 172 |
+
|
| 173 |
+
logger.info("ModelMgr", f"Downloading: {repo_id}/{filename}")
|
| 174 |
+
service = get_model_service()
|
| 175 |
+
result = service.download_model(repo_id, filename)
|
| 176 |
+
|
| 177 |
+
if result["success"]:
|
| 178 |
+
return f"β {result.get('message')}", get_installed_models_table()
|
| 179 |
+
elif result.get("duplicate"):
|
| 180 |
+
return f"β οΈ {result.get('error')} - Choose a different quantization.", get_installed_models_table()
|
| 181 |
+
else:
|
| 182 |
+
return f"β Error: {result.get('error')}", get_installed_models_table()
|
| 183 |
+
|
| 184 |
+
def on_auto_download(repo_id: str):
|
| 185 |
+
"""Auto-download best quantization"""
|
| 186 |
+
if not repo_id:
|
| 187 |
+
return "Select a model first", get_installed_models_table()
|
| 188 |
+
|
| 189 |
+
service = get_model_service()
|
| 190 |
+
files = service.get_hf_model_files(repo_id)
|
| 191 |
+
|
| 192 |
+
if not files:
|
| 193 |
+
return "No GGUF files found", get_installed_models_table()
|
| 194 |
+
|
| 195 |
+
# Find best file (Q4_K_M preferred)
|
| 196 |
+
best_file = None
|
| 197 |
+
for quant in RECOMMENDED_QUANTS + ["Q4_0", "Q5_0"]:
|
| 198 |
+
for f in files:
|
| 199 |
+
if f["quant"] == quant and not f["is_installed"]:
|
| 200 |
+
best_file = f["filename"]
|
| 201 |
+
break
|
| 202 |
+
if best_file:
|
| 203 |
+
break
|
| 204 |
+
|
| 205 |
+
if not best_file:
|
| 206 |
+
# Try first non-installed file
|
| 207 |
+
for f in files:
|
| 208 |
+
if not f["is_installed"]:
|
| 209 |
+
best_file = f["filename"]
|
| 210 |
+
break
|
| 211 |
+
|
| 212 |
+
if not best_file:
|
| 213 |
+
return "β οΈ All quantizations already installed", get_installed_models_table()
|
| 214 |
+
|
| 215 |
+
return on_download_model(repo_id, best_file)
|
| 216 |
+
|
| 217 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 218 |
+
# BUILD UI
|
| 219 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 220 |
+
|
| 221 |
+
with gr.Column(elem_classes="model-manager"):
|
| 222 |
+
gr.Markdown("# Model Manager")
|
| 223 |
+
|
| 224 |
+
# Current Model Status
|
| 225 |
+
with gr.Row():
|
| 226 |
+
loaded_model_display = gr.Textbox(
|
| 227 |
+
value=get_loaded_model_display(),
|
| 228 |
+
label="Currently Loaded",
|
| 229 |
+
interactive=False,
|
| 230 |
+
scale=3
|
| 231 |
+
)
|
| 232 |
+
btn_unload = gr.Button("Unload", size="sm", variant="stop")
|
| 233 |
+
btn_refresh = gr.Button("π Refresh", size="sm")
|
| 234 |
+
|
| 235 |
+
gr.Markdown("---")
|
| 236 |
+
|
| 237 |
+
# ββββοΏ½οΏ½ββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 238 |
+
# INSTALLED MODELS TABLE
|
| 239 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 240 |
+
gr.Markdown("### Installed Models")
|
| 241 |
+
gr.Markdown("*Click a row to select, then use actions below*")
|
| 242 |
+
|
| 243 |
+
installed_table = gr.Dataframe(
|
| 244 |
+
headers=["ID", "Name", "Type", "Size", "Quant", "Status"],
|
| 245 |
+
value=get_installed_models_table(),
|
| 246 |
+
interactive=False,
|
| 247 |
+
row_count=6,
|
| 248 |
+
elem_classes="model-table"
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
selected_model_id = gr.Textbox(
|
| 252 |
+
label="Selected",
|
| 253 |
+
interactive=False,
|
| 254 |
+
visible=True
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
with gr.Row():
|
| 258 |
+
btn_load = gr.Button("βΆ Load Selected", variant="primary")
|
| 259 |
+
btn_delete = gr.Button("ποΈ Delete Selected", variant="stop")
|
| 260 |
+
btn_configure = gr.Button("βοΈ Configure")
|
| 261 |
+
|
| 262 |
+
action_status = gr.Textbox(
|
| 263 |
+
label="",
|
| 264 |
+
show_label=False,
|
| 265 |
+
interactive=False
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
gr.Markdown("---")
|
| 269 |
+
|
| 270 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 271 |
+
# ADD FROM HUGGINGFACE
|
| 272 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 273 |
+
gr.Markdown("### Add from HuggingFace")
|
| 274 |
+
|
| 275 |
+
with gr.Row():
|
| 276 |
+
hf_search_input = gr.Textbox(
|
| 277 |
+
placeholder="Search models (tinyllama, phi, mistral...)",
|
| 278 |
+
show_label=False,
|
| 279 |
+
scale=4
|
| 280 |
+
)
|
| 281 |
+
hf_max_params = gr.Dropdown(
|
| 282 |
+
choices=[("< 3B (Fast)", "3"), ("< 7B (OK)", "7"), ("Any", "any")],
|
| 283 |
+
value="7",
|
| 284 |
+
label="Size",
|
| 285 |
+
scale=1
|
| 286 |
+
)
|
| 287 |
+
btn_search = gr.Button("π Search", variant="primary")
|
| 288 |
+
|
| 289 |
+
hf_status = gr.Textbox(
|
| 290 |
+
label="",
|
| 291 |
+
show_label=False,
|
| 292 |
+
interactive=False
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
+
# Search Results
|
| 296 |
+
hf_results_table = gr.Dataframe(
|
| 297 |
+
headers=["Model ID", "Params", "Est. Size", "Compat", "Downloads", "Installed"],
|
| 298 |
+
value=[],
|
| 299 |
+
interactive=False,
|
| 300 |
+
row_count=8,
|
| 301 |
+
elem_classes="model-table"
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
selected_hf_repo = gr.Textbox(
|
| 305 |
+
label="Selected Model",
|
| 306 |
+
interactive=False
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
# File Selection Panel (shown after selecting a model)
|
| 310 |
+
with gr.Column(visible=False) as file_panel:
|
| 311 |
+
gr.Markdown("#### Select Quantization")
|
| 312 |
+
|
| 313 |
+
files_table = gr.Dataframe(
|
| 314 |
+
headers=["Filename", "Quant", "Recommended", "Status"],
|
| 315 |
+
value=[],
|
| 316 |
+
interactive=False,
|
| 317 |
+
row_count=6
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
selected_file = gr.Textbox(
|
| 321 |
+
label="Selected File",
|
| 322 |
+
interactive=False
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
with gr.Row():
|
| 326 |
+
btn_download = gr.Button("β¬οΈ Download Selected", variant="primary")
|
| 327 |
+
btn_auto_download = gr.Button("β‘ Auto Download (Best Q4)")
|
| 328 |
+
btn_close_files = gr.Button("Close")
|
| 329 |
+
|
| 330 |
+
download_status = gr.Textbox(
|
| 331 |
+
label="",
|
| 332 |
+
show_label=False,
|
| 333 |
+
interactive=False
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
gr.Markdown("---")
|
| 337 |
+
gr.Markdown("**Legend:** β
Best (<1.5B) | β
Good (<3B) | β οΈ OK (<7B) | β Too Large (>7B)")
|
| 338 |
+
|
| 339 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 340 |
+
# WIRE UP EVENTS
|
| 341 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 342 |
+
|
| 343 |
+
# Installed models table
|
| 344 |
+
installed_table.select(
|
| 345 |
+
on_select_model,
|
| 346 |
+
inputs=[installed_table],
|
| 347 |
+
outputs=[selected_model_id]
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
btn_load.click(
|
| 351 |
+
on_load_model,
|
| 352 |
+
inputs=[selected_model_id],
|
| 353 |
+
outputs=[installed_table, loaded_model_display, action_status]
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
btn_unload.click(
|
| 357 |
+
on_unload_model,
|
| 358 |
+
outputs=[installed_table, loaded_model_display, action_status]
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
btn_delete.click(
|
| 362 |
+
on_delete_model,
|
| 363 |
+
inputs=[selected_model_id],
|
| 364 |
+
outputs=[installed_table, selected_model_id, action_status]
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
btn_refresh.click(
|
| 368 |
+
on_refresh,
|
| 369 |
+
outputs=[installed_table, loaded_model_display]
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
# HF Search
|
| 373 |
+
btn_search.click(
|
| 374 |
+
on_search_hf,
|
| 375 |
+
inputs=[hf_search_input, hf_max_params],
|
| 376 |
+
outputs=[hf_results_table, hf_status]
|
| 377 |
+
)
|
| 378 |
+
|
| 379 |
+
hf_search_input.submit(
|
| 380 |
+
on_search_hf,
|
| 381 |
+
inputs=[hf_search_input, hf_max_params],
|
| 382 |
+
outputs=[hf_results_table, hf_status]
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
# HF model selection
|
| 386 |
+
hf_results_table.select(
|
| 387 |
+
on_select_hf_model,
|
| 388 |
+
inputs=[hf_results_table],
|
| 389 |
+
outputs=[selected_hf_repo, file_panel]
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
# When HF model selected, load its files
|
| 393 |
+
selected_hf_repo.change(
|
| 394 |
+
on_get_files,
|
| 395 |
+
inputs=[selected_hf_repo],
|
| 396 |
+
outputs=[files_table, download_status]
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
# File selection
|
| 400 |
+
files_table.select(
|
| 401 |
+
on_select_file,
|
| 402 |
+
inputs=[files_table],
|
| 403 |
+
outputs=[selected_file]
|
| 404 |
+
)
|
| 405 |
+
|
| 406 |
+
# Download
|
| 407 |
+
btn_download.click(
|
| 408 |
+
on_download_model,
|
| 409 |
+
inputs=[selected_hf_repo, selected_file],
|
| 410 |
+
outputs=[download_status, installed_table]
|
| 411 |
+
)
|
| 412 |
+
|
| 413 |
+
btn_auto_download.click(
|
| 414 |
+
on_auto_download,
|
| 415 |
+
inputs=[selected_hf_repo],
|
| 416 |
+
outputs=[download_status, installed_table]
|
| 417 |
+
)
|
| 418 |
+
|
| 419 |
+
btn_close_files.click(
|
| 420 |
+
lambda: gr.update(visible=False),
|
| 421 |
+
outputs=[file_panel]
|
| 422 |
+
)
|
| 423 |
+
|
| 424 |
+
# Return components for external access
|
| 425 |
+
return {
|
| 426 |
+
"installed_table": installed_table,
|
| 427 |
+
"loaded_display": loaded_model_display,
|
| 428 |
+
"selected_id": selected_model_id,
|
| 429 |
+
"refresh": on_refresh
|
| 430 |
+
}
|
ui/theme.py
ADDED
|
@@ -0,0 +1,607 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Theme and CSS - ChatGPT/Grok inspired dark theme
|
| 3 |
+
"""
|
| 4 |
+
import gradio as gr
|
| 5 |
+
|
| 6 |
+
THEME_CSS = """
|
| 7 |
+
/* βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 8 |
+
ROOT VARIABLES
|
| 9 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ */
|
| 10 |
+
:root {
|
| 11 |
+
--bg-primary: #0d0d0d;
|
| 12 |
+
--bg-secondary: #171717;
|
| 13 |
+
--bg-tertiary: #212121;
|
| 14 |
+
--bg-hover: #2a2a2a;
|
| 15 |
+
--bg-active: #333333;
|
| 16 |
+
|
| 17 |
+
--text-primary: #ececec;
|
| 18 |
+
--text-secondary: #9a9a9a;
|
| 19 |
+
--text-muted: #666666;
|
| 20 |
+
|
| 21 |
+
--border-color: #333333;
|
| 22 |
+
--border-light: #444444;
|
| 23 |
+
|
| 24 |
+
--accent-primary: #10a37f;
|
| 25 |
+
--accent-hover: #0d8c6d;
|
| 26 |
+
--accent-light: rgba(16, 163, 127, 0.1);
|
| 27 |
+
|
| 28 |
+
--error: #ef4444;
|
| 29 |
+
--warning: #f59e0b;
|
| 30 |
+
--success: #10b981;
|
| 31 |
+
|
| 32 |
+
--sidebar-width: 260px;
|
| 33 |
+
--header-height: 56px;
|
| 34 |
+
--input-height: 52px;
|
| 35 |
+
|
| 36 |
+
--radius-sm: 6px;
|
| 37 |
+
--radius-md: 12px;
|
| 38 |
+
--radius-lg: 16px;
|
| 39 |
+
--radius-full: 9999px;
|
| 40 |
+
|
| 41 |
+
--shadow-sm: 0 1px 2px rgba(0,0,0,0.3);
|
| 42 |
+
--shadow-md: 0 4px 6px rgba(0,0,0,0.4);
|
| 43 |
+
--shadow-lg: 0 10px 15px rgba(0,0,0,0.5);
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
/* βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 47 |
+
GLOBAL STYLES
|
| 48 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ */
|
| 49 |
+
body, .gradio-container {
|
| 50 |
+
background: var(--bg-primary) !important;
|
| 51 |
+
color: var(--text-primary) !important;
|
| 52 |
+
font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif !important;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
.gradio-container {
|
| 56 |
+
max-width: 100% !important;
|
| 57 |
+
padding: 0 !important;
|
| 58 |
+
margin: 0 !important;
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
/* Hide Gradio footer */
|
| 62 |
+
footer { display: none !important; }
|
| 63 |
+
|
| 64 |
+
/* βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 65 |
+
SIDEBAR
|
| 66 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ */
|
| 67 |
+
.sidebar {
|
| 68 |
+
background: var(--bg-secondary) !important;
|
| 69 |
+
border-right: 1px solid var(--border-color) !important;
|
| 70 |
+
height: 100vh;
|
| 71 |
+
display: flex;
|
| 72 |
+
flex-direction: column;
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
.sidebar-header {
|
| 76 |
+
padding: 12px 16px;
|
| 77 |
+
border-bottom: 1px solid var(--border-color);
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
.new-chat-btn {
|
| 81 |
+
width: 100%;
|
| 82 |
+
background: var(--bg-tertiary) !important;
|
| 83 |
+
border: 1px solid var(--border-color) !important;
|
| 84 |
+
color: var(--text-primary) !important;
|
| 85 |
+
padding: 12px 16px !important;
|
| 86 |
+
border-radius: var(--radius-md) !important;
|
| 87 |
+
font-weight: 500 !important;
|
| 88 |
+
transition: all 0.2s ease !important;
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
.new-chat-btn:hover {
|
| 92 |
+
background: var(--bg-hover) !important;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
.session-list {
|
| 96 |
+
flex: 1;
|
| 97 |
+
overflow-y: auto;
|
| 98 |
+
padding: 8px;
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
.session-item {
|
| 102 |
+
padding: 10px 14px;
|
| 103 |
+
margin: 2px 0;
|
| 104 |
+
border-radius: var(--radius-md);
|
| 105 |
+
cursor: pointer;
|
| 106 |
+
color: var(--text-secondary);
|
| 107 |
+
font-size: 14px;
|
| 108 |
+
transition: all 0.15s ease;
|
| 109 |
+
white-space: nowrap;
|
| 110 |
+
overflow: hidden;
|
| 111 |
+
text-overflow: ellipsis;
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
.session-item:hover {
|
| 115 |
+
background: var(--bg-hover);
|
| 116 |
+
color: var(--text-primary);
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
.session-item.active {
|
| 120 |
+
background: var(--bg-active);
|
| 121 |
+
color: var(--text-primary);
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
.session-group-title {
|
| 125 |
+
font-size: 11px;
|
| 126 |
+
text-transform: uppercase;
|
| 127 |
+
color: var(--text-muted);
|
| 128 |
+
padding: 16px 14px 6px;
|
| 129 |
+
font-weight: 600;
|
| 130 |
+
letter-spacing: 0.5px;
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
.sidebar-footer {
|
| 134 |
+
padding: 12px;
|
| 135 |
+
border-top: 1px solid var(--border-color);
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
/* βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 139 |
+
CHAT AREA
|
| 140 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ */
|
| 141 |
+
.chat-container {
|
| 142 |
+
display: flex;
|
| 143 |
+
flex-direction: column;
|
| 144 |
+
height: 100vh;
|
| 145 |
+
background: var(--bg-primary);
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
.chat-header {
|
| 149 |
+
height: var(--header-height);
|
| 150 |
+
padding: 0 20px;
|
| 151 |
+
display: flex;
|
| 152 |
+
align-items: center;
|
| 153 |
+
justify-content: space-between;
|
| 154 |
+
border-bottom: 1px solid var(--border-color);
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
.model-selector {
|
| 158 |
+
background: var(--bg-tertiary) !important;
|
| 159 |
+
border: 1px solid var(--border-color) !important;
|
| 160 |
+
border-radius: var(--radius-full) !important;
|
| 161 |
+
padding: 8px 16px !important;
|
| 162 |
+
font-size: 13px !important;
|
| 163 |
+
color: var(--text-primary) !important;
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
.chat-messages {
|
| 167 |
+
flex: 1;
|
| 168 |
+
overflow-y: auto;
|
| 169 |
+
padding: 20px;
|
| 170 |
+
max-width: 800px;
|
| 171 |
+
margin: 0 auto;
|
| 172 |
+
width: 100%;
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
.message {
|
| 176 |
+
margin-bottom: 24px;
|
| 177 |
+
line-height: 1.6;
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
.message.user {
|
| 181 |
+
color: var(--text-primary);
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
.message.assistant {
|
| 185 |
+
color: var(--text-secondary);
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
.message-content {
|
| 189 |
+
white-space: pre-wrap;
|
| 190 |
+
word-break: break-word;
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
/* Welcome screen */
|
| 194 |
+
.welcome-container {
|
| 195 |
+
flex: 1;
|
| 196 |
+
display: flex;
|
| 197 |
+
flex-direction: column;
|
| 198 |
+
align-items: center;
|
| 199 |
+
justify-content: center;
|
| 200 |
+
padding: 40px;
|
| 201 |
+
text-align: center;
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
.welcome-title {
|
| 205 |
+
font-size: 28px;
|
| 206 |
+
font-weight: 600;
|
| 207 |
+
margin-bottom: 8px;
|
| 208 |
+
color: var(--text-primary);
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
.welcome-subtitle {
|
| 212 |
+
font-size: 16px;
|
| 213 |
+
color: var(--text-muted);
|
| 214 |
+
margin-bottom: 32px;
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
.suggestion-cards {
|
| 218 |
+
display: grid;
|
| 219 |
+
grid-template-columns: repeat(2, 1fr);
|
| 220 |
+
gap: 12px;
|
| 221 |
+
max-width: 600px;
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
.suggestion-card {
|
| 225 |
+
background: var(--bg-secondary);
|
| 226 |
+
border: 1px solid var(--border-color);
|
| 227 |
+
border-radius: var(--radius-md);
|
| 228 |
+
padding: 16px;
|
| 229 |
+
cursor: pointer;
|
| 230 |
+
text-align: left;
|
| 231 |
+
transition: all 0.2s ease;
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
.suggestion-card:hover {
|
| 235 |
+
background: var(--bg-tertiary);
|
| 236 |
+
border-color: var(--border-light);
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
/* βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 240 |
+
INPUT AREA
|
| 241 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ */
|
| 242 |
+
.input-container {
|
| 243 |
+
padding: 16px 20px 24px;
|
| 244 |
+
max-width: 800px;
|
| 245 |
+
margin: 0 auto;
|
| 246 |
+
width: 100%;
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
.input-wrapper {
|
| 250 |
+
background: var(--bg-secondary) !important;
|
| 251 |
+
border: 1px solid var(--border-color) !important;
|
| 252 |
+
border-radius: var(--radius-lg) !important;
|
| 253 |
+
padding: 4px !important;
|
| 254 |
+
display: flex;
|
| 255 |
+
align-items: flex-end;
|
| 256 |
+
gap: 8px;
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
.input-wrapper:focus-within {
|
| 260 |
+
border-color: var(--accent-primary) !important;
|
| 261 |
+
box-shadow: 0 0 0 2px var(--accent-light) !important;
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
.chat-input textarea {
|
| 265 |
+
background: transparent !important;
|
| 266 |
+
border: none !important;
|
| 267 |
+
color: var(--text-primary) !important;
|
| 268 |
+
font-size: 15px !important;
|
| 269 |
+
padding: 12px 16px !important;
|
| 270 |
+
resize: none !important;
|
| 271 |
+
min-height: 24px !important;
|
| 272 |
+
max-height: 200px !important;
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
.chat-input textarea::placeholder {
|
| 276 |
+
color: var(--text-muted) !important;
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
.input-actions {
|
| 280 |
+
display: flex;
|
| 281 |
+
gap: 4px;
|
| 282 |
+
padding: 8px;
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
.input-btn {
|
| 286 |
+
width: 36px !important;
|
| 287 |
+
height: 36px !important;
|
| 288 |
+
min-width: 36px !important;
|
| 289 |
+
padding: 0 !important;
|
| 290 |
+
border-radius: var(--radius-md) !important;
|
| 291 |
+
background: transparent !important;
|
| 292 |
+
border: none !important;
|
| 293 |
+
color: var(--text-muted) !important;
|
| 294 |
+
cursor: pointer;
|
| 295 |
+
transition: all 0.15s ease;
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
.input-btn:hover {
|
| 299 |
+
background: var(--bg-hover) !important;
|
| 300 |
+
color: var(--text-primary) !important;
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
.send-btn {
|
| 304 |
+
background: var(--accent-primary) !important;
|
| 305 |
+
color: white !important;
|
| 306 |
+
}
|
| 307 |
+
|
| 308 |
+
.send-btn:hover {
|
| 309 |
+
background: var(--accent-hover) !important;
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
.send-btn:disabled {
|
| 313 |
+
background: var(--bg-tertiary) !important;
|
| 314 |
+
color: var(--text-muted) !important;
|
| 315 |
+
}
|
| 316 |
+
|
| 317 |
+
/* βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 318 |
+
SETTINGS PANEL (Dropdown)
|
| 319 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ */
|
| 320 |
+
.settings-panel {
|
| 321 |
+
position: absolute;
|
| 322 |
+
top: var(--header-height);
|
| 323 |
+
right: 20px;
|
| 324 |
+
background: var(--bg-secondary);
|
| 325 |
+
border: 1px solid var(--border-color);
|
| 326 |
+
border-radius: var(--radius-md);
|
| 327 |
+
padding: 16px;
|
| 328 |
+
min-width: 280px;
|
| 329 |
+
box-shadow: var(--shadow-lg);
|
| 330 |
+
z-index: 100;
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
.settings-section {
|
| 334 |
+
margin-bottom: 16px;
|
| 335 |
+
}
|
| 336 |
+
|
| 337 |
+
.settings-label {
|
| 338 |
+
font-size: 12px;
|
| 339 |
+
color: var(--text-muted);
|
| 340 |
+
margin-bottom: 6px;
|
| 341 |
+
text-transform: uppercase;
|
| 342 |
+
font-weight: 600;
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
/* βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββοΏ½οΏ½βββββββ
|
| 346 |
+
MODEL MANAGER
|
| 347 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ */
|
| 348 |
+
.model-manager {
|
| 349 |
+
padding: 24px;
|
| 350 |
+
max-width: 1000px;
|
| 351 |
+
margin: 0 auto;
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
.model-table {
|
| 355 |
+
background: var(--bg-secondary) !important;
|
| 356 |
+
border: 1px solid var(--border-color) !important;
|
| 357 |
+
border-radius: var(--radius-md) !important;
|
| 358 |
+
}
|
| 359 |
+
|
| 360 |
+
.model-table th {
|
| 361 |
+
background: var(--bg-tertiary) !important;
|
| 362 |
+
color: var(--text-secondary) !important;
|
| 363 |
+
font-weight: 600 !important;
|
| 364 |
+
text-transform: uppercase !important;
|
| 365 |
+
font-size: 11px !important;
|
| 366 |
+
letter-spacing: 0.5px !important;
|
| 367 |
+
}
|
| 368 |
+
|
| 369 |
+
.model-table td {
|
| 370 |
+
border-bottom: 1px solid var(--border-color) !important;
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
.model-table tr:hover td {
|
| 374 |
+
background: var(--bg-hover) !important;
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
/* Radio selection in table */
|
| 378 |
+
.model-radio {
|
| 379 |
+
accent-color: var(--accent-primary);
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
/* βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 383 |
+
MODAL
|
| 384 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ */
|
| 385 |
+
.modal-overlay {
|
| 386 |
+
position: fixed;
|
| 387 |
+
inset: 0;
|
| 388 |
+
background: rgba(0, 0, 0, 0.7);
|
| 389 |
+
display: flex;
|
| 390 |
+
align-items: center;
|
| 391 |
+
justify-content: center;
|
| 392 |
+
z-index: 1000;
|
| 393 |
+
}
|
| 394 |
+
|
| 395 |
+
.modal {
|
| 396 |
+
background: var(--bg-secondary);
|
| 397 |
+
border: 1px solid var(--border-color);
|
| 398 |
+
border-radius: var(--radius-lg);
|
| 399 |
+
max-width: 600px;
|
| 400 |
+
width: 90%;
|
| 401 |
+
max-height: 80vh;
|
| 402 |
+
overflow: hidden;
|
| 403 |
+
box-shadow: var(--shadow-lg);
|
| 404 |
+
}
|
| 405 |
+
|
| 406 |
+
.modal-header {
|
| 407 |
+
padding: 16px 20px;
|
| 408 |
+
border-bottom: 1px solid var(--border-color);
|
| 409 |
+
display: flex;
|
| 410 |
+
justify-content: space-between;
|
| 411 |
+
align-items: center;
|
| 412 |
+
}
|
| 413 |
+
|
| 414 |
+
.modal-title {
|
| 415 |
+
font-size: 18px;
|
| 416 |
+
font-weight: 600;
|
| 417 |
+
}
|
| 418 |
+
|
| 419 |
+
.modal-close {
|
| 420 |
+
background: none;
|
| 421 |
+
border: none;
|
| 422 |
+
color: var(--text-muted);
|
| 423 |
+
cursor: pointer;
|
| 424 |
+
font-size: 20px;
|
| 425 |
+
}
|
| 426 |
+
|
| 427 |
+
.modal-body {
|
| 428 |
+
padding: 20px;
|
| 429 |
+
overflow-y: auto;
|
| 430 |
+
max-height: 60vh;
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
.modal-footer {
|
| 434 |
+
padding: 16px 20px;
|
| 435 |
+
border-top: 1px solid var(--border-color);
|
| 436 |
+
display: flex;
|
| 437 |
+
justify-content: flex-end;
|
| 438 |
+
gap: 12px;
|
| 439 |
+
}
|
| 440 |
+
|
| 441 |
+
/* βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 442 |
+
BUTTONS
|
| 443 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ */
|
| 444 |
+
.btn {
|
| 445 |
+
padding: 10px 20px;
|
| 446 |
+
border-radius: var(--radius-md);
|
| 447 |
+
font-weight: 500;
|
| 448 |
+
font-size: 14px;
|
| 449 |
+
cursor: pointer;
|
| 450 |
+
transition: all 0.2s ease;
|
| 451 |
+
border: none;
|
| 452 |
+
}
|
| 453 |
+
|
| 454 |
+
.btn-primary {
|
| 455 |
+
background: var(--accent-primary) !important;
|
| 456 |
+
color: white !important;
|
| 457 |
+
}
|
| 458 |
+
|
| 459 |
+
.btn-primary:hover {
|
| 460 |
+
background: var(--accent-hover) !important;
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
.btn-secondary {
|
| 464 |
+
background: var(--bg-tertiary) !important;
|
| 465 |
+
color: var(--text-primary) !important;
|
| 466 |
+
border: 1px solid var(--border-color) !important;
|
| 467 |
+
}
|
| 468 |
+
|
| 469 |
+
.btn-secondary:hover {
|
| 470 |
+
background: var(--bg-hover) !important;
|
| 471 |
+
}
|
| 472 |
+
|
| 473 |
+
.btn-danger {
|
| 474 |
+
background: var(--error) !important;
|
| 475 |
+
color: white !important;
|
| 476 |
+
}
|
| 477 |
+
|
| 478 |
+
.btn-sm {
|
| 479 |
+
padding: 6px 12px !important;
|
| 480 |
+
font-size: 13px !important;
|
| 481 |
+
}
|
| 482 |
+
|
| 483 |
+
/* βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 484 |
+
FORM ELEMENTS
|
| 485 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ */
|
| 486 |
+
input[type="text"],
|
| 487 |
+
input[type="search"],
|
| 488 |
+
textarea,
|
| 489 |
+
select {
|
| 490 |
+
background: var(--bg-tertiary) !important;
|
| 491 |
+
border: 1px solid var(--border-color) !important;
|
| 492 |
+
border-radius: var(--radius-md) !important;
|
| 493 |
+
color: var(--text-primary) !important;
|
| 494 |
+
padding: 10px 14px !important;
|
| 495 |
+
font-size: 14px !important;
|
| 496 |
+
}
|
| 497 |
+
|
| 498 |
+
input:focus,
|
| 499 |
+
textarea:focus,
|
| 500 |
+
select:focus {
|
| 501 |
+
border-color: var(--accent-primary) !important;
|
| 502 |
+
outline: none !important;
|
| 503 |
+
box-shadow: 0 0 0 2px var(--accent-light) !important;
|
| 504 |
+
}
|
| 505 |
+
|
| 506 |
+
/* Slider */
|
| 507 |
+
input[type="range"] {
|
| 508 |
+
accent-color: var(--accent-primary);
|
| 509 |
+
}
|
| 510 |
+
|
| 511 |
+
/* Checkbox */
|
| 512 |
+
input[type="checkbox"] {
|
| 513 |
+
accent-color: var(--accent-primary);
|
| 514 |
+
}
|
| 515 |
+
|
| 516 |
+
/* βββββββββββββββοΏ½οΏ½βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 517 |
+
STATUS BADGES
|
| 518 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ */
|
| 519 |
+
.badge {
|
| 520 |
+
display: inline-block;
|
| 521 |
+
padding: 4px 10px;
|
| 522 |
+
border-radius: var(--radius-full);
|
| 523 |
+
font-size: 12px;
|
| 524 |
+
font-weight: 500;
|
| 525 |
+
}
|
| 526 |
+
|
| 527 |
+
.badge-success {
|
| 528 |
+
background: rgba(16, 185, 129, 0.15);
|
| 529 |
+
color: var(--success);
|
| 530 |
+
}
|
| 531 |
+
|
| 532 |
+
.badge-warning {
|
| 533 |
+
background: rgba(245, 158, 11, 0.15);
|
| 534 |
+
color: var(--warning);
|
| 535 |
+
}
|
| 536 |
+
|
| 537 |
+
.badge-error {
|
| 538 |
+
background: rgba(239, 68, 68, 0.15);
|
| 539 |
+
color: var(--error);
|
| 540 |
+
}
|
| 541 |
+
|
| 542 |
+
.badge-info {
|
| 543 |
+
background: var(--accent-light);
|
| 544 |
+
color: var(--accent-primary);
|
| 545 |
+
}
|
| 546 |
+
|
| 547 |
+
/* βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 548 |
+
UTILITIES
|
| 549 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ */
|
| 550 |
+
.hidden { display: none !important; }
|
| 551 |
+
.flex { display: flex !important; }
|
| 552 |
+
.flex-1 { flex: 1 !important; }
|
| 553 |
+
.gap-2 { gap: 8px !important; }
|
| 554 |
+
.gap-4 { gap: 16px !important; }
|
| 555 |
+
.items-center { align-items: center !important; }
|
| 556 |
+
.justify-between { justify-content: space-between !important; }
|
| 557 |
+
.text-center { text-align: center !important; }
|
| 558 |
+
.text-sm { font-size: 13px !important; }
|
| 559 |
+
.text-muted { color: var(--text-muted) !important; }
|
| 560 |
+
.mb-4 { margin-bottom: 16px !important; }
|
| 561 |
+
.mt-4 { margin-top: 16px !important; }
|
| 562 |
+
.p-4 { padding: 16px !important; }
|
| 563 |
+
.w-full { width: 100% !important; }
|
| 564 |
+
|
| 565 |
+
/* Scrollbar */
|
| 566 |
+
::-webkit-scrollbar {
|
| 567 |
+
width: 8px;
|
| 568 |
+
height: 8px;
|
| 569 |
+
}
|
| 570 |
+
|
| 571 |
+
::-webkit-scrollbar-track {
|
| 572 |
+
background: transparent;
|
| 573 |
+
}
|
| 574 |
+
|
| 575 |
+
::-webkit-scrollbar-thumb {
|
| 576 |
+
background: var(--border-color);
|
| 577 |
+
border-radius: 4px;
|
| 578 |
+
}
|
| 579 |
+
|
| 580 |
+
::-webkit-scrollbar-thumb:hover {
|
| 581 |
+
background: var(--border-light);
|
| 582 |
+
}
|
| 583 |
+
"""
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
def get_theme():
|
| 587 |
+
"""Get Gradio theme (Soft dark)"""
|
| 588 |
+
return gr.themes.Soft(
|
| 589 |
+
primary_hue="emerald",
|
| 590 |
+
secondary_hue="gray",
|
| 591 |
+
neutral_hue="gray",
|
| 592 |
+
font=gr.themes.GoogleFont("Inter"),
|
| 593 |
+
).set(
|
| 594 |
+
body_background_fill="#0d0d0d",
|
| 595 |
+
body_background_fill_dark="#0d0d0d",
|
| 596 |
+
block_background_fill="#171717",
|
| 597 |
+
block_background_fill_dark="#171717",
|
| 598 |
+
block_border_width="0px",
|
| 599 |
+
block_label_background_fill="#171717",
|
| 600 |
+
block_label_background_fill_dark="#171717",
|
| 601 |
+
input_background_fill="#212121",
|
| 602 |
+
input_background_fill_dark="#212121",
|
| 603 |
+
button_primary_background_fill="#10a37f",
|
| 604 |
+
button_primary_background_fill_dark="#10a37f",
|
| 605 |
+
button_primary_background_fill_hover="#0d8c6d",
|
| 606 |
+
button_primary_background_fill_hover_dark="#0d8c6d",
|
| 607 |
+
)
|