rbt2025 commited on
Commit
287fd15
Β·
verified Β·
1 Parent(s): ba3cc2b

Deploy TextAI v2 - Clean architecture

Browse files
Files changed (3) hide show
  1. app.py +1559 -158
  2. requirements.txt +0 -3
  3. ui/theme.py +27 -21
app.py CHANGED
@@ -1,199 +1,1600 @@
1
  """
2
- TextAI v2.0
3
- Clean, Professional AI Chat Interface
4
  """
 
 
 
 
 
 
 
 
 
 
 
5
  import gradio as gr
 
 
6
 
7
- from core.config import VERSION, APP_TITLE
8
- from core.logger import logger
9
- from core.state import get_state
10
- from ui.theme import THEME_CSS, get_theme
11
- from ui.chat import build_chat_ui
12
- from ui.models import build_model_manager_ui
13
- from api import api_health, api_get_status
14
-
15
-
16
- def create_app():
17
- """Create the Gradio application"""
18
-
19
- # Initialize state on startup
20
- state = get_state()
21
- state.sync_with_filesystem()
22
- logger.info("App", f"TextAI v{VERSION} starting")
23
-
24
- # Gradio 6.0: theme/css moved to launch(), but we use Blocks() for structure
25
- with gr.Blocks(title=APP_TITLE) as app:
26
-
27
- with gr.Tabs() as main_tabs:
28
-
29
- # ══════════════════════════════════════════════════════════
30
- # TAB 1: CHAT
31
- # ══════════════════════════════════════════════════════════
32
- with gr.Tab("Chat", id="chat"):
33
- chat_components = build_chat_ui()
34
-
35
- # ══════════════════════════════════════════════════════════
36
- # TAB 2: MODELS
37
- # ══════════════════════════════════════════════════════════
38
- with gr.Tab("Models", id="models"):
39
- model_components = build_model_manager_ui()
40
-
41
- # ══════════════════════════════════════════════════════════
42
- # TAB 3: TOOLS
43
- # ══════════════════════════════════════════════════════════
44
- with gr.Tab("Tools", id="tools"):
45
- with gr.Tabs():
46
-
47
- # System Info
48
- with gr.Tab("System"):
49
- gr.Markdown("### System Status")
50
- btn_status = gr.Button("Get Status", variant="primary")
51
- status_output = gr.JSON(label="Status")
52
-
53
- btn_status.click(
54
- lambda: __import__('json').loads(api_get_status()),
55
- outputs=[status_output]
56
- )
57
 
58
- # Logs
59
- with gr.Tab("Logs"):
60
- gr.Markdown("### Application Logs")
61
- with gr.Row():
62
- log_level = gr.Dropdown(
63
- choices=["all", "INFO", "WARN", "ERROR", "EVENT"],
64
- value="all",
65
- label="Level"
66
- )
67
- log_limit = gr.Number(value=50, label="Limit")
68
- btn_refresh_logs = gr.Button("Refresh")
69
- btn_clear_logs = gr.Button("Clear", variant="stop")
70
-
71
- logs_display = gr.TextArea(
72
- label="",
73
- lines=20,
74
- interactive=False
75
- )
76
 
77
- def get_logs(level, limit):
78
- from core.logger import logger as log
79
- logs = log.get_logs(
80
- level=level if level != "all" else None,
81
- limit=int(limit)
82
- )
83
- lines = []
84
- for l in reversed(logs):
85
- ts = l["timestamp"].split("T")[1][:8]
86
- lines.append(f"[{ts}] [{l['level']}] [{l['module']}] {l['message']}")
87
- return "\n".join(lines)
88
-
89
- def clear_logs(level):
90
- from core.logger import logger as log
91
- log.clear(level if level != "all" else None)
92
- return "Logs cleared"
93
-
94
- btn_refresh_logs.click(
95
- get_logs,
96
- inputs=[log_level, log_limit],
97
- outputs=[logs_display]
98
- )
99
 
100
- btn_clear_logs.click(
101
- clear_logs,
102
- inputs=[log_level],
103
- outputs=[logs_display]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
- # API Info
107
- with gr.Tab("API"):
108
- gr.Markdown("### API Endpoints")
109
- gr.Markdown("""
110
- All endpoints return JSON. Use for integration with local apps.
111
-
112
- **Models:**
113
- - `api_list_models()` - List installed models
114
- - `api_load_model(model_id)` - Load a model
115
- - `api_unload_model()` - Unload current model
116
- - `api_search_models(query, max_params, limit)` - Search HF
117
- - `api_download_model(repo_id, filename)` - Download model
118
-
119
- **Sessions:**
120
- - `api_list_sessions()` - List all sessions
121
- - `api_create_session(title, type, system_prompt)` - New session
122
- - `api_delete_session(session_id)` - Delete session
123
-
124
- **Chat:**
125
- - `api_chat(session_id, message, max_tokens, temperature)` - Chat
126
- - `api_inference(prompt, messages, system_prompt, ...)` - Direct inference
127
-
128
- **System:**
129
- - `api_health()` - Health check
130
- - `api_get_status()` - Full status
131
- - `api_get_backends()` - Available backends
132
- """)
133
-
134
- gr.Markdown("### Test")
135
- btn_health = gr.Button("Health Check")
136
- health_output = gr.JSON()
137
-
138
- btn_health.click(
139
- lambda: __import__('json').loads(api_health()),
140
- outputs=[health_output]
141
  )
 
142
 
143
- # ══════════════════════════════════════════════════════════
144
- # TAB 4: ABOUT
145
- # ══════════════════════════════════════════════════════════
146
- with gr.Tab("About", id="about"):
147
- gr.Markdown(f"""
148
- # TextAI v{VERSION}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
 
150
- **Local AI Chat Assistant**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
152
- A clean, professional chat interface for running local LLM models.
153
 
154
  ---
155
 
156
  ### Features
157
 
158
- - Chat - Natural conversation with AI
159
- - Session Management - Auto-save, rename, delete chats
160
- - Model Manager - Download, load, configure models
161
- - HuggingFace Search - Find and download GGUF models
162
- - Customization - System prompts, temperature, tokens
163
- - API - Full API for integration with other apps
 
164
 
165
  ---
166
 
167
- ### Supported Models
168
 
169
- - **GGUF** - Via llama-cpp-python (recommended)
170
- - **Transformers** - Via HuggingFace transformers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
 
172
  ---
173
 
174
  ### Keyboard Shortcuts
175
 
176
  - `Enter` - Send message
177
- - `Shift+Enter` - New line
178
 
179
  ---
180
 
181
- Built with Gradio
182
  """)
183
 
184
- return app
 
 
185
 
 
 
 
 
 
 
 
186
 
187
- # ══════════════════════════════════════════════════════════════════════════════
188
- # MAIN
189
- # ══════════════════════════════════════════════════════════════════════════════
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
  if __name__ == "__main__":
192
- app = create_app()
193
- app.launch(
194
  server_name="0.0.0.0",
195
  server_port=7860,
196
  share=False,
197
- theme=get_theme(),
198
- css=THEME_CSS
199
  )
 
1
  """
2
+ TextAI v2.3 - Full-Featured ChatGPT-Clone Interface
3
+ Clean UI with dropdown navigation, Model Manager, Logs, File Manager, Settings
4
  """
5
+ import os
6
+ import sys
7
+ import json
8
+ import time
9
+ import uuid
10
+ import glob
11
+ import logging
12
+ from datetime import datetime
13
+ from typing import Any, Dict, List, Optional, Tuple
14
+ from collections import deque
15
+
16
  import gradio as gr
17
+ import psutil
18
+ import requests
19
 
20
+ # Optional backends (handled gracefully if missing)
21
+ try:
22
+ from llama_cpp import Llama
23
+ except ImportError:
24
+ Llama = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
+ try:
27
+ from huggingface_hub import InferenceClient, HfApi, list_models
28
+ except ImportError:
29
+ InferenceClient = None
30
+ HfApi = None
31
+ list_models = None
 
 
 
 
 
 
 
 
 
 
 
 
32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
+ # ════════════════════════════════════════════════════════════════════════════════
35
+ # CONFIGURATION
36
+ # ════════════════════════════════════════════════════════════════════════════════
37
+ APP_TITLE = "TextAI"
38
+ VERSION = "2.3.0"
39
+
40
+ DEFAULT_SYSTEM_PROMPT = "You are a helpful AI assistant. Be concise, accurate, and helpful."
41
+
42
+ HF_TOKEN = os.getenv("HF_TOKEN", "")
43
+ HF_MODEL = os.getenv("HF_MODEL", "meta-llama/Llama-3.1-8B-Instruct")
44
+
45
+ OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1")
46
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
47
+ OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
48
+
49
+ LLAMA_GGUF_PATH = os.getenv("LLAMA_GGUF_PATH", "")
50
+
51
+ EXPORT_FILENAME = "chat_export.json"
52
+ LOG_DIR = "/tmp/textai_logs" if os.environ.get("SPACE_ID") else "./logs"
53
+ DATA_DIR = "/tmp/textai_data" if os.environ.get("SPACE_ID") else "./data"
54
+
55
+ # Ensure directories exist
56
+ os.makedirs(LOG_DIR, exist_ok=True)
57
+ os.makedirs(DATA_DIR, exist_ok=True)
58
+
59
+
60
+ # ════════════════════════════════════════════════════════════════════════════════
61
+ # LOGGING SYSTEM
62
+ # ════════════════════════════════════════════════════════════════════════════════
63
+ class LogCapture:
64
+ """Capture logs in memory for display"""
65
+ def __init__(self, max_lines: int = 500):
66
+ self.logs = deque(maxlen=max_lines)
67
+ self.log_file = os.path.join(LOG_DIR, f"textai_{datetime.now().strftime('%Y%m%d')}.log")
68
+
69
+ def add(self, level: str, message: str):
70
+ timestamp = datetime.now().strftime("%H:%M:%S")
71
+ entry = f"[{timestamp}] [{level}] {message}"
72
+ self.logs.append(entry)
73
+ # Also write to file
74
+ try:
75
+ with open(self.log_file, "a", encoding="utf-8") as f:
76
+ f.write(entry + "\n")
77
+ except Exception:
78
+ pass
79
+
80
+ def info(self, msg: str):
81
+ self.add("INFO", msg)
82
+
83
+ def error(self, msg: str):
84
+ self.add("ERROR", msg)
85
+
86
+ def warning(self, msg: str):
87
+ self.add("WARN", msg)
88
+
89
+ def get_logs(self, count: int = 100) -> str:
90
+ recent = list(self.logs)[-count:]
91
+ return "\n".join(recent) if recent else "No logs yet."
92
+
93
+ def clear(self):
94
+ self.logs.clear()
95
+
96
+
97
+ LOG = LogCapture()
98
+ LOG.info(f"TextAI v{VERSION} starting...")
99
+
100
+
101
+ # ════════════════════════════════════════════════════════════════════════════════
102
+ # STATE MANAGEMENT
103
+ # ════════════════════════════════════════════════════════════════════════════════
104
+ def _new_conv(title: str = "New chat") -> Dict[str, Any]:
105
+ return {
106
+ "id": str(uuid.uuid4()),
107
+ "title": title,
108
+ "messages": [],
109
+ "created": time.time(),
110
+ "updated": time.time(),
111
+ }
112
+
113
+
114
+ def _default_state() -> Dict[str, Any]:
115
+ conv = _new_conv("New chat")
116
+ return {"active_id": conv["id"], "conversations": [conv]}
117
+
118
+
119
+ def _get_conv(state: Dict[str, Any], conv_id: str) -> Dict[str, Any]:
120
+ for c in state["conversations"]:
121
+ if c["id"] == conv_id:
122
+ return c
123
+ conv = _new_conv("New chat")
124
+ state["conversations"].append(conv)
125
+ state["active_id"] = conv["id"]
126
+ return conv
127
+
128
+
129
+ def _active_conv(state: Dict[str, Any]) -> Dict[str, Any]:
130
+ return _get_conv(state, state["active_id"])
131
+
132
+
133
+ def _conv_choices(state: Dict[str, Any]) -> List[Tuple[str, str]]:
134
+ return [(c["title"].strip() or "Untitled", c["id"]) for c in state["conversations"]]
135
+
136
+
137
+ def _touch(conv: Dict[str, Any]) -> None:
138
+ conv["updated"] = time.time()
139
+
140
+
141
+ def _estimate_title(conv: Dict[str, Any]) -> str:
142
+ for m in conv["messages"]:
143
+ if m.get("role") == "user":
144
+ txt = str(m.get("content", "")).strip().splitlines()[0]
145
+ return (txt[:40] + "…") if len(txt) > 40 else (txt or "New chat")
146
+ return "New chat"
147
+
148
+
149
+ # ════════════════════════════════════════════════════════════════════════════════
150
+ # SYSTEM STATS & INFO
151
+ # ════════════════════════════════════════════════════════════════════════════════
152
+ def system_stats_md() -> str:
153
+ try:
154
+ cpu = psutil.cpu_percent(interval=0.05)
155
+ mem = psutil.virtual_memory()
156
+ return f"CPU {cpu:.0f}% Β· RAM {mem.percent:.0f}%"
157
+ except Exception:
158
+ return ""
159
+
160
+
161
+ def get_system_info() -> Dict[str, Any]:
162
+ try:
163
+ mem = psutil.virtual_memory()
164
+ disk = psutil.disk_usage("/")
165
+ return {
166
+ "cpu_percent": psutil.cpu_percent(interval=0.1),
167
+ "ram_total_gb": round(mem.total / (1024**3), 2),
168
+ "ram_used_gb": round(mem.used / (1024**3), 2),
169
+ "ram_percent": mem.percent,
170
+ "disk_total_gb": round(disk.total / (1024**3), 2),
171
+ "disk_used_gb": round(disk.used / (1024**3), 2),
172
+ "disk_percent": disk.percent,
173
+ "platform": sys.platform,
174
+ "python_version": sys.version.split()[0],
175
+ "space_id": os.environ.get("SPACE_ID", "local"),
176
+ "backends": {
177
+ "llama_cpp": Llama is not None,
178
+ "huggingface": InferenceClient is not None,
179
+ "hf_api": HfApi is not None,
180
+ },
181
+ "env_vars": {
182
+ "HF_TOKEN": "set" if HF_TOKEN else "not set",
183
+ "OPENAI_API_KEY": "set" if OPENAI_API_KEY else "not set",
184
+ "LLAMA_GGUF_PATH": LLAMA_GGUF_PATH or "not set",
185
+ },
186
+ "directories": {
187
+ "log_dir": LOG_DIR,
188
+ "data_dir": DATA_DIR,
189
+ }
190
+ }
191
+ except Exception as e:
192
+ return {"error": str(e)}
193
+
194
+
195
+ # ════════════════════════════════════════════════════════════════════════════════
196
+ # MODEL MANAGER - HuggingFace Hub Search
197
+ # ════════════════════════════════════════════════════════════════════════════════
198
+ def search_hf_models(query: str, task: str = "text-generation", limit: int = 20) -> List[Dict]:
199
+ """Search HuggingFace Hub for models"""
200
+ if not list_models:
201
+ LOG.warning("huggingface_hub not installed - model search unavailable")
202
+ return []
203
+
204
+ try:
205
+ LOG.info(f"Searching HF Hub: query='{query}', task='{task}'")
206
+ models = list_models(
207
+ search=query,
208
+ task=task if task != "all" else None,
209
+ sort="downloads",
210
+ direction=-1,
211
+ limit=limit,
212
+ )
213
+ results = []
214
+ for m in models:
215
+ results.append({
216
+ "id": m.id,
217
+ "downloads": getattr(m, "downloads", 0),
218
+ "likes": getattr(m, "likes", 0),
219
+ "task": getattr(m, "pipeline_tag", "unknown"),
220
+ "updated": str(getattr(m, "lastModified", ""))[:10],
221
+ })
222
+ LOG.info(f"Found {len(results)} models")
223
+ return results
224
+ except Exception as e:
225
+ LOG.error(f"Model search failed: {e}")
226
+ return []
227
+
228
+
229
+ def format_model_results(results: List[Dict]) -> str:
230
+ """Format model results as markdown table"""
231
+ if not results:
232
+ return "No models found. Try a different search query."
233
+
234
+ lines = ["| Model ID | Downloads | Likes | Task |", "|----------|-----------|-------|------|"]
235
+ for r in results:
236
+ downloads = f"{r['downloads']:,}" if r['downloads'] else "N/A"
237
+ likes = f"{r['likes']:,}" if r['likes'] else "0"
238
+ lines.append(f"| `{r['id']}` | {downloads} | {likes} | {r['task']} |")
239
+ return "\n".join(lines)
240
+
241
+
242
+ def search_models_ui(query: str, task: str) -> str:
243
+ """UI handler for model search"""
244
+ if not query.strip():
245
+ return "Enter a search query (e.g., 'llama', 'mistral', 'phi')"
246
+ results = search_hf_models(query, task)
247
+ return format_model_results(results)
248
+
249
+
250
+ # ════════════════════════════════════════════════════════════════════════════════
251
+ # FILE MANAGER
252
+ # ════════════════════════════════════════════════════════════════════════════════
253
+ def list_files(directory: str = None) -> List[Dict]:
254
+ """List files in a directory"""
255
+ if directory is None:
256
+ directory = DATA_DIR
257
+
258
+ try:
259
+ files = []
260
+ for item in os.listdir(directory):
261
+ path = os.path.join(directory, item)
262
+ stat = os.stat(path)
263
+ files.append({
264
+ "name": item,
265
+ "type": "dir" if os.path.isdir(path) else "file",
266
+ "size": stat.st_size if os.path.isfile(path) else 0,
267
+ "modified": datetime.fromtimestamp(stat.st_mtime).strftime("%Y-%m-%d %H:%M"),
268
+ })
269
+ return sorted(files, key=lambda x: (x["type"] != "dir", x["name"].lower()))
270
+ except Exception as e:
271
+ LOG.error(f"Failed to list files: {e}")
272
+ return []
273
+
274
+
275
+ def format_file_list(files: List[Dict]) -> str:
276
+ """Format file list as markdown"""
277
+ if not files:
278
+ return "Directory is empty."
279
+
280
+ lines = ["| Name | Type | Size | Modified |", "|------|------|------|----------|"]
281
+ for f in files:
282
+ size = f"{f['size']:,} B" if f['type'] == 'file' else "-"
283
+ icon = "πŸ“" if f['type'] == 'dir' else "πŸ“„"
284
+ lines.append(f"| {icon} {f['name']} | {f['type']} | {size} | {f['modified']} |")
285
+ return "\n".join(lines)
286
+
287
+
288
+ def get_file_manager_view(path: str = "") -> Tuple[str, str]:
289
+ """Get file manager view for a path"""
290
+ if not path:
291
+ path = DATA_DIR
292
+
293
+ # Security: restrict to allowed directories
294
+ allowed_roots = [DATA_DIR, LOG_DIR, "/tmp"]
295
+ if not any(os.path.abspath(path).startswith(os.path.abspath(r)) for r in allowed_roots):
296
+ return DATA_DIR, "Access denied. Restricted to data/logs directories."
297
+
298
+ if not os.path.exists(path):
299
+ return DATA_DIR, "Path does not exist."
300
+
301
+ files = list_files(path)
302
+ return path, format_file_list(files)
303
+
304
+
305
+ def read_file_content(filepath: str) -> str:
306
+ """Read file content (text files only)"""
307
+ # Security check
308
+ allowed_roots = [DATA_DIR, LOG_DIR, "/tmp"]
309
+ if not any(os.path.abspath(filepath).startswith(os.path.abspath(r)) for r in allowed_roots):
310
+ return "Access denied."
311
+
312
+ if not os.path.isfile(filepath):
313
+ return "Not a file."
314
+
315
+ # Check file size (limit to 100KB)
316
+ if os.path.getsize(filepath) > 100 * 1024:
317
+ return "File too large to display (>100KB)."
318
+
319
+ try:
320
+ with open(filepath, "r", encoding="utf-8", errors="replace") as f:
321
+ return f.read()
322
+ except Exception as e:
323
+ return f"Error reading file: {e}"
324
+
325
+
326
+ # ════════════════════════════════════════════════════════════════════════════════
327
+ # BACKENDS
328
+ # ════════════════════════════════════════════════════════════════════════════════
329
+ def call_openai_compat(
330
+ messages: List[Dict[str, str]],
331
+ temperature: float,
332
+ max_tokens: int,
333
+ model: str,
334
+ base_url: str,
335
+ api_key: str,
336
+ ) -> str:
337
+ if not api_key:
338
+ raise RuntimeError("Missing API key")
339
+
340
+ LOG.info(f"Calling OpenAI-compatible API: model={model}")
341
+ url = base_url.rstrip("/") + "/chat/completions"
342
+ headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
343
+ payload = {
344
+ "model": model,
345
+ "messages": messages,
346
+ "temperature": float(temperature),
347
+ "max_tokens": int(max_tokens),
348
+ "stream": False,
349
+ }
350
+ r = requests.post(url, headers=headers, json=payload, timeout=90)
351
+ r.raise_for_status()
352
+ LOG.info("OpenAI API call successful")
353
+ return r.json()["choices"][0]["message"]["content"]
354
+
355
+
356
+ def call_hf_inference(
357
+ messages: List[Dict[str, str]],
358
+ temperature: float,
359
+ max_tokens: int,
360
+ model: str,
361
+ token: str,
362
+ ) -> str:
363
+ if InferenceClient is None:
364
+ raise RuntimeError("huggingface_hub not installed")
365
+ if not token:
366
+ raise RuntimeError("Missing HF_TOKEN")
367
+
368
+ LOG.info(f"Calling HF Inference: model={model}")
369
+ client = InferenceClient(model=model, token=token)
370
+
371
+ sys_prompt = ""
372
+ turns = []
373
+ for m in messages:
374
+ if m["role"] == "system":
375
+ sys_prompt = m["content"]
376
+ elif m["role"] == "user":
377
+ turns.append(f"User: {m['content']}")
378
+ else:
379
+ turns.append(f"Assistant: {m['content']}")
380
+
381
+ prompt = ""
382
+ if sys_prompt:
383
+ prompt += f"System: {sys_prompt}\n"
384
+ prompt += "\n".join(turns) + "\nAssistant:"
385
+
386
+ out = client.text_generation(
387
+ prompt,
388
+ max_new_tokens=int(max_tokens),
389
+ temperature=float(temperature),
390
+ do_sample=True,
391
+ return_full_text=False,
392
+ )
393
+ LOG.info("HF Inference call successful")
394
+ return out
395
+
396
+
397
+ _LLAMA_INSTANCE = None
398
+
399
+ def call_llama_local(
400
+ messages: List[Dict[str, str]],
401
+ temperature: float,
402
+ max_tokens: int,
403
+ ) -> str:
404
+ global _LLAMA_INSTANCE
405
+
406
+ if Llama is None:
407
+ raise RuntimeError("llama-cpp-python not installed")
408
+ if not LLAMA_GGUF_PATH:
409
+ raise RuntimeError("Set LLAMA_GGUF_PATH environment variable")
410
+
411
+ LOG.info(f"Calling local llama.cpp: path={LLAMA_GGUF_PATH}")
412
+ if _LLAMA_INSTANCE is None:
413
+ LOG.info("Loading llama.cpp model...")
414
+ _LLAMA_INSTANCE = Llama(model_path=LLAMA_GGUF_PATH, n_ctx=4096, verbose=False)
415
+ LOG.info("Model loaded successfully")
416
+
417
+ sys_prompt = ""
418
+ turns = []
419
+ for m in messages:
420
+ if m["role"] == "system":
421
+ sys_prompt = m["content"]
422
+ elif m["role"] == "user":
423
+ turns.append(f"User: {m['content']}")
424
+ else:
425
+ turns.append(f"Assistant: {m['content']}")
426
+
427
+ prompt = (f"System: {sys_prompt}\n" if sys_prompt else "") + "\n".join(turns) + "\nAssistant:"
428
+
429
+ res = _LLAMA_INSTANCE(
430
+ prompt,
431
+ max_tokens=int(max_tokens),
432
+ temperature=float(temperature),
433
+ stop=["User:"],
434
+ )
435
+ LOG.info("llama.cpp call successful")
436
+ return res["choices"][0]["text"].strip()
437
+
438
+
439
+ def call_demo_backend(messages: List[Dict[str, str]], *args, **kwargs) -> str:
440
+ last_user = ""
441
+ for m in reversed(messages):
442
+ if m["role"] == "user":
443
+ last_user = m["content"]
444
+ break
445
+ LOG.info("Using demo backend (echo mode)")
446
+ return f"(Demo mode) You said: {last_user}\n\nTo use real AI, configure a backend in Settings."
447
+
448
+
449
+ def generate_response(
450
+ backend: str,
451
+ messages: List[Dict[str, str]],
452
+ temperature: float,
453
+ max_tokens: int,
454
+ openai_base_url: str,
455
+ openai_api_key: str,
456
+ openai_model: str,
457
+ hf_token: str,
458
+ hf_model: str,
459
+ ) -> str:
460
+ if backend == "OpenAI-compatible":
461
+ return call_openai_compat(messages, temperature, max_tokens, openai_model, openai_base_url, openai_api_key)
462
+ if backend == "HuggingFace Inference":
463
+ return call_hf_inference(messages, temperature, max_tokens, hf_model, hf_token)
464
+ if backend == "Local (llama.cpp)":
465
+ return call_llama_local(messages, temperature, max_tokens)
466
+ return call_demo_backend(messages)
467
+
468
+
469
+ def pseudo_stream(text: str, chunk: int = 8, delay: float = 0.01):
470
+ for i in range(0, len(text), chunk):
471
+ yield text[:i + chunk]
472
+ time.sleep(delay)
473
+
474
+
475
+ # ════════════════════════════════════════════════════════════════════════════════
476
+ # UI EVENT HANDLERS - CHAT
477
+ # ════════════════════════════════════════════════════════════════════════════════
478
+ def ui_new_chat(state):
479
+ conv = _new_conv("New chat")
480
+ state["conversations"].insert(0, conv)
481
+ state["active_id"] = conv["id"]
482
+ LOG.info(f"Created new chat: {conv['id'][:8]}")
483
+ return state, _conv_choices(state), conv["id"], [], ""
484
+
485
+
486
+ def ui_select_chat(state, conv_id):
487
+ if not conv_id:
488
+ return state, [], ""
489
+ state["active_id"] = conv_id
490
+ conv = _active_conv(state)
491
+ LOG.info(f"Switched to chat: {conv['title']}")
492
+ return state, conv["messages"], conv["title"]
493
+
494
+
495
+ def ui_rename_chat(state, new_title):
496
+ conv = _active_conv(state)
497
+ old_title = conv["title"]
498
+ conv["title"] = (new_title or "").strip() or "New chat"
499
+ _touch(conv)
500
+ LOG.info(f"Renamed chat: '{old_title}' -> '{conv['title']}'")
501
+ return state, _conv_choices(state)
502
+
503
+
504
+ def ui_delete_chat(state):
505
+ active = state["active_id"]
506
+ deleted_title = _active_conv(state)["title"]
507
+ state["conversations"] = [c for c in state["conversations"] if c["id"] != active]
508
+ if not state["conversations"]:
509
+ state = _default_state()
510
+ else:
511
+ state["active_id"] = state["conversations"][0]["id"]
512
+ conv = _active_conv(state)
513
+ LOG.info(f"Deleted chat: {deleted_title}")
514
+ return state, _conv_choices(state), conv["id"], conv["messages"], conv["title"]
515
+
516
+
517
+ def ui_clear_chat(state):
518
+ conv = _active_conv(state)
519
+ conv["messages"] = []
520
+ _touch(conv)
521
+ LOG.info(f"Cleared chat: {conv['title']}")
522
+ return state, []
523
+
524
+
525
+ def ui_add_user_message(state, user_input):
526
+ if not user_input or not user_input.strip():
527
+ return state, _active_conv(state)["messages"], ""
528
+
529
+ conv = _active_conv(state)
530
+ conv["messages"].append({"role": "user", "content": user_input.strip()})
531
+ _touch(conv)
532
+
533
+ if conv["title"] == "New chat":
534
+ conv["title"] = _estimate_title(conv)
535
+
536
+ LOG.info(f"User message added ({len(user_input)} chars)")
537
+ return state, conv["messages"], ""
538
+
539
+
540
+ def ui_generate_response(
541
+ state, backend, system_prompt, temperature, max_tokens,
542
+ openai_base_url, openai_api_key, openai_model, hf_token, hf_model,
543
+ ):
544
+ conv = _active_conv(state)
545
+
546
+ api_messages = [{"role": "system", "content": system_prompt.strip() or DEFAULT_SYSTEM_PROMPT}]
547
+ for m in conv["messages"]:
548
+ api_messages.append({"role": m["role"], "content": str(m["content"])})
549
+
550
+ conv["messages"].append({"role": "assistant", "content": ""})
551
+ _touch(conv)
552
+
553
+ try:
554
+ full_response = generate_response(
555
+ backend=backend,
556
+ messages=api_messages,
557
+ temperature=temperature,
558
+ max_tokens=max_tokens,
559
+ openai_base_url=openai_base_url,
560
+ openai_api_key=openai_api_key,
561
+ openai_model=openai_model,
562
+ hf_token=hf_token,
563
+ hf_model=hf_model,
564
+ )
565
+ except Exception as e:
566
+ LOG.error(f"Generation failed: {e}")
567
+ full_response = f"Error: {e}"
568
+
569
+ for partial in pseudo_stream(full_response):
570
+ conv["messages"][-1]["content"] = partial
571
+ yield state, conv["messages"]
572
+
573
+
574
+ def ui_regenerate(state):
575
+ conv = _active_conv(state)
576
+ if conv["messages"] and conv["messages"][-1].get("role") == "assistant":
577
+ conv["messages"].pop()
578
+ _touch(conv)
579
+ LOG.info("Regenerating last response")
580
+ return state, conv["messages"]
581
+
582
+
583
+ # ════════════════════════════════════════════════════════════════════════════════
584
+ # UI EVENT HANDLERS - EXPORT/IMPORT
585
+ # ════════════════════════════════════════════════════════════════════════════════
586
+ def ui_export(state):
587
+ path = os.path.join(DATA_DIR, EXPORT_FILENAME)
588
+ with open(path, "w", encoding="utf-8") as f:
589
+ json.dump(state, f, ensure_ascii=False, indent=2)
590
+ LOG.info(f"Exported chats to {path}")
591
+ return path
592
+
593
+
594
+ def ui_import(state, file_obj):
595
+ if file_obj is None:
596
+ return state, _conv_choices(state), state["active_id"], _active_conv(state)["messages"], _active_conv(state)["title"]
597
+
598
+ try:
599
+ with open(file_obj.name, "r", encoding="utf-8") as f:
600
+ loaded = json.load(f)
601
+ if isinstance(loaded, dict) and "conversations" in loaded:
602
+ state = loaded
603
+ if not state.get("conversations"):
604
+ state = _default_state()
605
+ if not state.get("active_id"):
606
+ state["active_id"] = state["conversations"][0]["id"]
607
+ LOG.info(f"Imported {len(state['conversations'])} conversations")
608
+ except Exception as e:
609
+ LOG.error(f"Import failed: {e}")
610
+
611
+ conv = _active_conv(state)
612
+ return state, _conv_choices(state), conv["id"], conv["messages"], conv["title"]
613
+
614
+
615
+ # ════════════════════════════════════════════════════════════════════════════════
616
+ # VIEW SWITCHING
617
+ # ════════════════════════════════════════════════════════════════════════════════
618
+ def switch_view(view_name):
619
+ """Returns visibility updates for each view"""
620
+ views = ["Chat", "Models", "Files", "Logs", "Settings", "About"]
621
+ return tuple(gr.update(visible=(view_name == v)) for v in views)
622
+
623
+
624
+ def toggle_sidebar(is_visible):
625
+ """Toggle sidebar visibility"""
626
+ return gr.update(visible=not is_visible), not is_visible
627
+
628
+
629
+ # ════════════════════════════════════════════════════════════════════════════════
630
+ # CSS - ChatGPT Clone Style
631
+ # ════════════════════════════════════════════════════════════════════════════════
632
+ CSS = """
633
+ /* ═══════════════════════════════════════════════════════════════════════════════
634
+ ROOT VARIABLES
635
+ ═══════════════════════════════════════════════════════════════════════════════ */
636
+ :root {
637
+ --bg-primary: #0d0d0d;
638
+ --bg-secondary: #171717;
639
+ --bg-tertiary: #1a1a1a;
640
+ --bg-hover: #2a2a2a;
641
+ --border-color: #2f2f2f;
642
+ --text-primary: #ececec;
643
+ --text-secondary: #9b9b9b;
644
+ --text-muted: #666;
645
+ --accent: #10a37f;
646
+ --accent-hover: #0d8c6d;
647
+ --user-bubble: #2a2a2a;
648
+ --bot-bubble: transparent;
649
+ }
650
+
651
+ /* ═══════════════════════════════════════════════════════════════════════════════
652
+ BASE STYLES
653
+ ═══════════════════════════════════════════════════════════════════════════════ */
654
+ * { box-sizing: border-box; }
655
+
656
+ body, .gradio-container {
657
+ background: var(--bg-primary) !important;
658
+ color: var(--text-primary) !important;
659
+ font-family: 'SΓΆhne', 'ui-sans-serif', system-ui, -apple-system, sans-serif !important;
660
+ margin: 0 !important;
661
+ padding: 0 !important;
662
+ }
663
+
664
+ .gradio-container {
665
+ max-width: 100% !important;
666
+ padding: 0 !important;
667
+ }
668
+
669
+ footer { display: none !important; }
670
+
671
+ /* ═══════════════════════════════════════════════════════════════════════════════
672
+ HEADER BAR - Top Navigation
673
+ ═══════════════════════════════════════════════════════════════════════════════ */
674
+ #header-bar {
675
+ display: flex !important;
676
+ align-items: center !important;
677
+ justify-content: space-between !important;
678
+ padding: 8px 16px !important;
679
+ background: var(--bg-secondary) !important;
680
+ border-bottom: 1px solid var(--border-color) !important;
681
+ height: 52px !important;
682
+ min-height: 52px !important;
683
+ position: sticky !important;
684
+ top: 0 !important;
685
+ z-index: 100 !important;
686
+ }
687
+
688
+ #header-left {
689
+ display: flex !important;
690
+ align-items: center !important;
691
+ gap: 12px !important;
692
+ }
693
+
694
+ #sidebar-toggle {
695
+ background: transparent !important;
696
+ border: none !important;
697
+ color: var(--text-primary) !important;
698
+ cursor: pointer !important;
699
+ padding: 8px !important;
700
+ border-radius: 6px !important;
701
+ width: 40px !important;
702
+ min-width: 40px !important;
703
+ }
704
+
705
+ #sidebar-toggle:hover {
706
+ background: var(--bg-hover) !important;
707
+ }
708
+
709
+ #app-title {
710
+ font-size: 1.1rem !important;
711
+ font-weight: 600 !important;
712
+ color: var(--text-primary) !important;
713
+ margin: 0 !important;
714
+ }
715
+
716
+ #header-right {
717
+ display: flex !important;
718
+ align-items: center !important;
719
+ gap: 8px !important;
720
+ }
721
+
722
+ #nav-dropdown {
723
+ min-width: 130px !important;
724
+ background: var(--bg-tertiary) !important;
725
+ border: 1px solid var(--border-color) !important;
726
+ border-radius: 8px !important;
727
+ }
728
+
729
+ #nav-dropdown select, #nav-dropdown input {
730
+ background: transparent !important;
731
+ color: var(--text-primary) !important;
732
+ border: none !important;
733
+ font-size: 14px !important;
734
+ padding: 8px 12px !important;
735
+ }
736
+
737
+ #new-chat-btn {
738
+ background: var(--accent) !important;
739
+ color: white !important;
740
+ border: none !important;
741
+ border-radius: 8px !important;
742
+ padding: 8px 16px !important;
743
+ font-size: 14px !important;
744
+ cursor: pointer !important;
745
+ min-width: auto !important;
746
+ }
747
+
748
+ #new-chat-btn:hover {
749
+ background: var(--accent-hover) !important;
750
+ }
751
+
752
+ /* ═══════════════════════════════════════════════════════════════════════════════
753
+ MAIN LAYOUT - Fixed pixel heights for HF iframe
754
+ ═══════════════════════════════════════════════════════════════════════════════ */
755
+ #main-layout {
756
+ display: flex !important;
757
+ height: 620px !important;
758
+ max-height: 620px !important;
759
+ overflow: hidden !important;
760
+ }
761
+
762
+ /* ═══════════════════════════════════════════════════════════════════════════════
763
+ SIDEBAR
764
+ ═══════════════════════════════════════════════════════════════════════════════ */
765
+ #sidebar {
766
+ width: 260px !important;
767
+ min-width: 260px !important;
768
+ max-width: 260px !important;
769
+ background: var(--bg-secondary) !important;
770
+ border-right: 1px solid var(--border-color) !important;
771
+ display: flex !important;
772
+ flex-direction: column !important;
773
+ height: 620px !important;
774
+ overflow: hidden !important;
775
+ }
776
+
777
+ #sidebar-content {
778
+ flex: 1 !important;
779
+ overflow-y: auto !important;
780
+ padding: 12px !important;
781
+ }
782
+
783
+ #sidebar-header {
784
+ padding: 12px !important;
785
+ border-bottom: 1px solid var(--border-color) !important;
786
+ }
787
+
788
+ #sidebar-header h4 {
789
+ margin: 0 !important;
790
+ font-size: 12px !important;
791
+ text-transform: uppercase !important;
792
+ color: var(--text-secondary) !important;
793
+ letter-spacing: 0.5px !important;
794
+ }
795
+
796
+ /* Conversation list */
797
+ #chat-list {
798
+ max-height: 300px !important;
799
+ overflow-y: auto !important;
800
+ }
801
+
802
+ /* Sidebar buttons */
803
+ #sidebar-actions {
804
+ padding: 12px !important;
805
+ border-top: 1px solid var(--border-color) !important;
806
+ display: flex !important;
807
+ flex-direction: column !important;
808
+ gap: 8px !important;
809
+ }
810
+
811
+ /* ═══════════════════════════════════════════════════════════════════════════════
812
+ CHAT AREA
813
+ ═══════════════════════════════════════════════════════════════════════════════ */
814
+ #chat-area {
815
+ flex: 1 !important;
816
+ display: flex !important;
817
+ flex-direction: column !important;
818
+ height: 620px !important;
819
+ max-height: 620px !important;
820
+ background: var(--bg-primary) !important;
821
+ overflow: hidden !important;
822
+ }
823
+
824
+ #chat-header {
825
+ padding: 8px 16px !important;
826
+ border-bottom: 1px solid var(--border-color) !important;
827
+ display: flex !important;
828
+ align-items: center !important;
829
+ justify-content: center !important;
830
+ min-height: 44px !important;
831
+ }
832
+
833
+ #model-selector {
834
+ max-width: 300px !important;
835
+ }
836
+
837
+ #model-selector select {
838
+ background: var(--bg-tertiary) !important;
839
+ border: 1px solid var(--border-color) !important;
840
+ border-radius: 8px !important;
841
+ color: var(--text-primary) !important;
842
+ padding: 6px 12px !important;
843
+ text-align: center !important;
844
+ }
845
+
846
+ /* Chatbot */
847
+ #chatbot {
848
+ height: 400px !important;
849
+ min-height: 400px !important;
850
+ max-height: 400px !important;
851
+ border: none !important;
852
+ background: transparent !important;
853
+ }
854
+
855
+ #chatbot > div {
856
+ height: 100% !important;
857
+ max-height: 100% !important;
858
+ }
859
+
860
+ /* Message bubbles */
861
+ #chatbot .user {
862
+ background: var(--user-bubble) !important;
863
+ border-radius: 16px !important;
864
+ padding: 12px 16px !important;
865
+ }
866
+
867
+ #chatbot .bot {
868
+ background: var(--bot-bubble) !important;
869
+ }
870
+
871
+ /* ═══════════════════════════════════════════════════════════════════════════════
872
+ INPUT AREA
873
+ ═══════════════════════════════════════════════════════════════════════════════ */
874
+ #input-area {
875
+ padding: 12px 16px !important;
876
+ background: var(--bg-primary) !important;
877
+ }
878
+
879
+ #input-row {
880
+ display: flex !important;
881
+ gap: 8px !important;
882
+ align-items: flex-end !important;
883
+ background: var(--bg-tertiary) !important;
884
+ border: 1px solid var(--border-color) !important;
885
+ border-radius: 16px !important;
886
+ padding: 8px 12px !important;
887
+ }
888
+
889
+ #user-input {
890
+ flex: 1 !important;
891
+ }
892
+
893
+ #user-input textarea {
894
+ background: transparent !important;
895
+ border: none !important;
896
+ color: var(--text-primary) !important;
897
+ resize: none !important;
898
+ padding: 8px !important;
899
+ font-size: 15px !important;
900
+ line-height: 1.5 !important;
901
+ }
902
+
903
+ #user-input textarea:focus {
904
+ outline: none !important;
905
+ box-shadow: none !important;
906
+ }
907
+
908
+ #send-btn {
909
+ background: var(--accent) !important;
910
+ color: white !important;
911
+ border: none !important;
912
+ border-radius: 10px !important;
913
+ padding: 10px 20px !important;
914
+ cursor: pointer !important;
915
+ font-size: 14px !important;
916
+ min-width: 70px !important;
917
+ }
918
+
919
+ #send-btn:hover {
920
+ background: var(--accent-hover) !important;
921
+ }
922
+
923
+ /* ═══════════════════════════════════════════════════════════════════════════════
924
+ SETTINGS PANEL (Accordion)
925
+ ═══════════════════════════════════════════════════════════════════════════════ */
926
+ .settings-accordion {
927
+ background: var(--bg-secondary) !important;
928
+ border: 1px solid var(--border-color) !important;
929
+ border-radius: 12px !important;
930
+ margin: 8px 16px !important;
931
+ }
932
+
933
+ /* ═══���═══════════════════════════════════════════════════════════════════════════
934
+ OTHER VIEWS
935
+ ═══════════════════════════════════════════════════════════════════════════════ */
936
+ #models-view, #files-view, #logs-view, #settings-view, #about-view {
937
+ height: 620px !important;
938
+ max-height: 620px !important;
939
+ overflow-y: auto !important;
940
+ padding: 24px !important;
941
+ background: var(--bg-primary) !important;
942
+ }
943
+
944
+ .view-container {
945
+ max-width: 900px !important;
946
+ margin: 0 auto !important;
947
+ }
948
+
949
+ .view-container h2 {
950
+ color: var(--text-primary) !important;
951
+ margin-bottom: 20px !important;
952
+ font-size: 1.5rem !important;
953
+ }
954
+
955
+ /* Cards in views */
956
+ .info-card {
957
+ background: var(--bg-secondary) !important;
958
+ border: 1px solid var(--border-color) !important;
959
+ border-radius: 12px !important;
960
+ padding: 20px !important;
961
+ margin-bottom: 16px !important;
962
+ }
963
+
964
+ .info-card h3 {
965
+ color: var(--text-primary) !important;
966
+ margin-top: 0 !important;
967
+ margin-bottom: 12px !important;
968
+ font-size: 1.1rem !important;
969
+ }
970
+
971
+ /* ═══════════════════════════════════════════════════════════════════════════════
972
+ FORM ELEMENTS
973
+ ═══════════════════════════════════════════════════════════════════════════════ */
974
+ input, textarea, select {
975
+ background: var(--bg-tertiary) !important;
976
+ border: 1px solid var(--border-color) !important;
977
+ color: var(--text-primary) !important;
978
+ border-radius: 8px !important;
979
+ }
980
+
981
+ input:focus, textarea:focus, select:focus {
982
+ border-color: var(--accent) !important;
983
+ outline: none !important;
984
+ }
985
+
986
+ button.primary {
987
+ background: var(--accent) !important;
988
+ color: white !important;
989
+ }
990
+
991
+ button.primary:hover {
992
+ background: var(--accent-hover) !important;
993
+ }
994
+
995
+ /* Code/Log output */
996
+ .log-output textarea {
997
+ font-family: 'Monaco', 'Menlo', monospace !important;
998
+ font-size: 12px !important;
999
+ background: #0a0a0a !important;
1000
+ color: #0f0 !important;
1001
+ }
1002
+
1003
+ /* ═══════════════════════════════════════════════════════════════════════════════
1004
+ SCROLLBAR
1005
+ ═══════════════════════════════════════════════════════════════════════════════ */
1006
+ ::-webkit-scrollbar {
1007
+ width: 6px;
1008
+ height: 6px;
1009
+ }
1010
+
1011
+ ::-webkit-scrollbar-track {
1012
+ background: transparent;
1013
+ }
1014
+
1015
+ ::-webkit-scrollbar-thumb {
1016
+ background: var(--border-color);
1017
+ border-radius: 3px;
1018
+ }
1019
+
1020
+ ::-webkit-scrollbar-thumb:hover {
1021
+ background: var(--text-muted);
1022
+ }
1023
+
1024
+ /* Stats display */
1025
+ .stats-display {
1026
+ font-size: 11px !important;
1027
+ color: var(--text-muted) !important;
1028
+ padding: 8px 12px !important;
1029
+ }
1030
+
1031
+ /* Version tag */
1032
+ .version-tag {
1033
+ font-size: 10px !important;
1034
+ color: var(--text-muted) !important;
1035
+ text-align: center !important;
1036
+ padding: 8px !important;
1037
+ }
1038
+
1039
+ /* Tables in markdown */
1040
+ table {
1041
+ width: 100% !important;
1042
+ border-collapse: collapse !important;
1043
+ margin: 12px 0 !important;
1044
+ }
1045
+
1046
+ th, td {
1047
+ padding: 8px 12px !important;
1048
+ border: 1px solid var(--border-color) !important;
1049
+ text-align: left !important;
1050
+ }
1051
+
1052
+ th {
1053
+ background: var(--bg-tertiary) !important;
1054
+ color: var(--text-primary) !important;
1055
+ }
1056
+
1057
+ td {
1058
+ color: var(--text-secondary) !important;
1059
+ }
1060
+
1061
+ code {
1062
+ background: var(--bg-tertiary) !important;
1063
+ padding: 2px 6px !important;
1064
+ border-radius: 4px !important;
1065
+ font-size: 13px !important;
1066
+ }
1067
+ """
1068
+
1069
+
1070
+ # ════════════════════════════════════════════════════════════════════════════════
1071
+ # GRADIO APP
1072
+ # ════════════════════════════════════════════════════════════════════════════════
1073
+ with gr.Blocks(css=CSS, title=APP_TITLE, theme=gr.themes.Soft(
1074
+ primary_hue="emerald",
1075
+ neutral_hue="gray",
1076
+ )) as demo:
1077
+
1078
+ state = gr.State(_default_state())
1079
+ sidebar_visible = gr.State(True)
1080
+
1081
+ # ════════���═══════════════════════════════════════════════════════════════════
1082
+ # HEADER BAR
1083
+ # ════════════════════════════════════════════════════════════════════════════
1084
+ with gr.Row(elem_id="header-bar"):
1085
+ with gr.Row(elem_id="header-left"):
1086
+ sidebar_toggle = gr.Button("☰", elem_id="sidebar-toggle", size="sm")
1087
+ gr.Markdown(f"**{APP_TITLE}**", elem_id="app-title")
1088
+
1089
+ with gr.Row(elem_id="header-right"):
1090
+ nav_dropdown = gr.Dropdown(
1091
+ choices=["Chat", "Models", "Files", "Logs", "Settings", "About"],
1092
+ value="Chat",
1093
+ label=None,
1094
+ show_label=False,
1095
+ elem_id="nav-dropdown",
1096
+ scale=0,
1097
+ min_width=130,
1098
+ )
1099
+ new_chat_btn = gr.Button("+ New", elem_id="new-chat-btn", size="sm")
1100
+
1101
+ # ════════════════════════════════════════════════════════════════════════════
1102
+ # MAIN LAYOUT
1103
+ # ════════════════════════════════════════════════════════════════════════════
1104
+ with gr.Row(elem_id="main-layout"):
1105
+
1106
+ # ════════════════════════════════════════════════════════════════════════
1107
+ # SIDEBAR
1108
+ # ════════════════════════════════════════════════════════════════════════
1109
+ with gr.Column(elem_id="sidebar", scale=0, visible=True) as sidebar:
1110
+ with gr.Column(elem_id="sidebar-header"):
1111
+ gr.Markdown("#### Conversations")
1112
+
1113
+ with gr.Column(elem_id="sidebar-content"):
1114
+ chat_selector = gr.Dropdown(
1115
+ choices=[],
1116
+ value=None,
1117
+ label=None,
1118
+ interactive=True,
1119
+ show_label=False,
1120
+ elem_id="chat-list",
1121
+ )
1122
+
1123
+ with gr.Group():
1124
+ rename_box = gr.Textbox(
1125
+ value="",
1126
+ placeholder="Rename chat...",
1127
+ show_label=False,
1128
+ container=False,
1129
+ )
1130
+ with gr.Row():
1131
+ rename_btn = gr.Button("Rename", size="sm")
1132
+ delete_btn = gr.Button("Delete", size="sm", variant="stop")
1133
+
1134
+ gr.Markdown("---")
1135
+
1136
+ with gr.Accordion("Import/Export", open=False):
1137
+ export_btn = gr.Button("Export Chats", size="sm")
1138
+ export_file = gr.File(label=None, visible=False)
1139
+ import_file = gr.File(label="Import", file_types=[".json"])
1140
+ import_btn = gr.Button("Import", size="sm")
1141
+
1142
+ with gr.Column(elem_id="sidebar-actions"):
1143
+ stats_display = gr.Markdown(value=system_stats_md, every=3, elem_classes="stats-display")
1144
+ gr.Markdown(f"v{VERSION}", elem_classes="version-tag")
1145
+
1146
+ # ════════════════════════════════════════════════════════════════════════
1147
+ # CHAT VIEW
1148
+ # ════════════════════════════════════════════════════════════════════════
1149
+ with gr.Column(elem_id="chat-area", visible=True) as chat_view:
1150
+
1151
+ with gr.Row(elem_id="chat-header"):
1152
+ backend = gr.Dropdown(
1153
+ choices=["Demo (offline)", "OpenAI-compatible", "HuggingFace Inference", "Local (llama.cpp)"],
1154
+ value="Demo (offline)",
1155
+ label=None,
1156
+ show_label=False,
1157
+ elem_id="model-selector",
1158
+ scale=0,
1159
+ min_width=200,
1160
+ )
1161
+
1162
+ with gr.Accordion("Settings", open=False, elem_classes="settings-accordion"):
1163
+ system_prompt = gr.Textbox(
1164
+ value=DEFAULT_SYSTEM_PROMPT,
1165
+ label="System Prompt",
1166
+ lines=2,
1167
+ )
1168
+ with gr.Row():
1169
+ temperature = gr.Slider(0, 2, value=0.7, step=0.1, label="Temperature")
1170
+ max_tokens = gr.Slider(64, 4096, value=1024, step=64, label="Max Tokens")
1171
+
1172
+ gr.Markdown("**OpenAI-compatible**")
1173
+ with gr.Row():
1174
+ openai_base_url = gr.Textbox(value=OPENAI_BASE_URL, label="Base URL")
1175
+ openai_model = gr.Textbox(value=OPENAI_MODEL, label="Model")
1176
+ openai_api_key = gr.Textbox(value=OPENAI_API_KEY, label="API Key", type="password")
1177
+
1178
+ gr.Markdown("**HuggingFace Inference**")
1179
+ with gr.Row():
1180
+ hf_model_box = gr.Textbox(value=HF_MODEL, label="Model")
1181
+ hf_token_box = gr.Textbox(value=HF_TOKEN, label="Token", type="password")
1182
+
1183
+ if Llama is None:
1184
+ gr.Markdown("*llama-cpp-python not installed*")
1185
+
1186
+ chatbot = gr.Chatbot(
1187
+ label=None,
1188
+ elem_id="chatbot",
1189
+ type="messages",
1190
+ show_copy_button=True,
1191
+ show_label=False,
1192
+ )
1193
+
1194
+ with gr.Column(elem_id="input-area"):
1195
+ with gr.Row(elem_id="input-row"):
1196
+ user_input = gr.Textbox(
1197
+ placeholder="Message TextAI...",
1198
+ show_label=False,
1199
+ elem_id="user-input",
1200
+ lines=1,
1201
+ max_lines=5,
1202
+ container=False,
1203
+ )
1204
+ send_btn = gr.Button("Send", elem_id="send-btn", variant="primary")
1205
+
1206
+ with gr.Row():
1207
+ clear_btn = gr.Button("Clear Chat", size="sm")
1208
+ regen_btn = gr.Button("Regenerate", size="sm")
1209
+
1210
+ # ════════════════════════════════════════════════════════════════════════
1211
+ # MODELS VIEW - HuggingFace Hub Search
1212
+ # ════════════════════════════════════════════════════════════════════════
1213
+ with gr.Column(elem_id="models-view", visible=False) as models_view:
1214
+ with gr.Column(elem_classes="view-container"):
1215
+ gr.Markdown("## Model Manager")
1216
+ gr.Markdown("Search HuggingFace Hub for models to use with the HF Inference backend.")
1217
+
1218
+ with gr.Group(elem_classes="info-card"):
1219
+ gr.Markdown("### Search Models")
1220
+ with gr.Row():
1221
+ model_search_query = gr.Textbox(
1222
+ placeholder="Search models (e.g., llama, mistral, phi)...",
1223
+ label=None,
1224
+ show_label=False,
1225
+ scale=3,
1226
  )
1227
+ model_search_task = gr.Dropdown(
1228
+ choices=["text-generation", "text2text-generation", "conversational", "all"],
1229
+ value="text-generation",
1230
+ label="Task",
1231
+ scale=1,
1232
+ )
1233
+ model_search_btn = gr.Button("Search", variant="primary", scale=0)
1234
+
1235
+ model_results = gr.Markdown("Enter a search query and click Search.")
1236
+
1237
+ model_search_btn.click(
1238
+ search_models_ui,
1239
+ inputs=[model_search_query, model_search_task],
1240
+ outputs=[model_results],
1241
+ )
1242
+ model_search_query.submit(
1243
+ search_models_ui,
1244
+ inputs=[model_search_query, model_search_task],
1245
+ outputs=[model_results],
1246
+ )
1247
+
1248
+ with gr.Group(elem_classes="info-card"):
1249
+ gr.Markdown("### Quick Links")
1250
+ gr.Markdown("""
1251
+ | Category | Popular Models |
1252
+ |----------|----------------|
1253
+ | **Chat/Instruct** | `meta-llama/Llama-3.1-8B-Instruct`, `mistralai/Mistral-7B-Instruct-v0.3` |
1254
+ | **Small/Fast** | `microsoft/Phi-3-mini-4k-instruct`, `google/gemma-2-2b-it` |
1255
+ | **Code** | `codellama/CodeLlama-7b-Instruct-hf`, `deepseek-ai/deepseek-coder-6.7b-instruct` |
1256
+ | **Multilingual** | `CohereForAI/aya-23-8B`, `Qwen/Qwen2-7B-Instruct` |
1257
+ """)
1258
 
1259
+ # ════════════════════════════════════════════════════════════════════════
1260
+ # FILES VIEW - File Manager
1261
+ # ════════════════════════════════════════════════════════════════════════
1262
+ with gr.Column(elem_id="files-view", visible=False) as files_view:
1263
+ with gr.Column(elem_classes="view-container"):
1264
+ gr.Markdown("## File Manager")
1265
+ gr.Markdown("Browse and manage files in the data directory.")
1266
+
1267
+ with gr.Group(elem_classes="info-card"):
1268
+ gr.Markdown("### Browse Files")
1269
+ with gr.Row():
1270
+ file_path_input = gr.Textbox(
1271
+ value=DATA_DIR,
1272
+ label="Path",
1273
+ scale=4,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1274
  )
1275
+ file_browse_btn = gr.Button("Browse", variant="primary", scale=0)
1276
 
1277
+ file_list_output = gr.Markdown("Click Browse to list files.")
1278
+
1279
+ file_browse_btn.click(
1280
+ lambda p: get_file_manager_view(p)[1],
1281
+ inputs=[file_path_input],
1282
+ outputs=[file_list_output],
1283
+ )
1284
+
1285
+ with gr.Group(elem_classes="info-card"):
1286
+ gr.Markdown("### View File")
1287
+ file_view_path = gr.Textbox(
1288
+ placeholder="Enter full file path to view...",
1289
+ label="File Path",
1290
+ )
1291
+ file_view_btn = gr.Button("View Content", size="sm")
1292
+ file_content_output = gr.Textbox(
1293
+ label="Content",
1294
+ lines=10,
1295
+ max_lines=20,
1296
+ interactive=False,
1297
+ )
1298
 
1299
+ file_view_btn.click(
1300
+ read_file_content,
1301
+ inputs=[file_view_path],
1302
+ outputs=[file_content_output],
1303
+ )
1304
+
1305
+ with gr.Group(elem_classes="info-card"):
1306
+ gr.Markdown("### Directories")
1307
+ gr.Markdown(f"""
1308
+ | Directory | Path | Purpose |
1309
+ |-----------|------|---------|
1310
+ | **Data** | `{DATA_DIR}` | Chat exports, user files |
1311
+ | **Logs** | `{LOG_DIR}` | Application logs |
1312
+ """)
1313
+
1314
+ # ════════════════════════════════════════════════════════════════════════
1315
+ # LOGS VIEW
1316
+ # ════════════════════════════════════════════════════════════════════════
1317
+ with gr.Column(elem_id="logs-view", visible=False) as logs_view:
1318
+ with gr.Column(elem_classes="view-container"):
1319
+ gr.Markdown("## Application Logs")
1320
+
1321
+ with gr.Group(elem_classes="info-card"):
1322
+ gr.Markdown("### Live Logs")
1323
+ with gr.Row():
1324
+ log_refresh_btn = gr.Button("Refresh", variant="primary", size="sm")
1325
+ log_clear_btn = gr.Button("Clear Logs", size="sm")
1326
+
1327
+ log_output = gr.Textbox(
1328
+ value=LOG.get_logs,
1329
+ label=None,
1330
+ lines=20,
1331
+ max_lines=30,
1332
+ interactive=False,
1333
+ elem_classes="log-output",
1334
+ every=5,
1335
+ )
1336
+
1337
+ log_refresh_btn.click(
1338
+ lambda: LOG.get_logs(200),
1339
+ outputs=[log_output],
1340
+ )
1341
+ log_clear_btn.click(
1342
+ lambda: (LOG.clear(), "Logs cleared.")[1],
1343
+ outputs=[log_output],
1344
+ )
1345
+
1346
+ with gr.Group(elem_classes="info-card"):
1347
+ gr.Markdown("### Log Files")
1348
+ gr.Markdown(f"Log files are stored in: `{LOG_DIR}`")
1349
+
1350
+ def list_log_files():
1351
+ try:
1352
+ files = [f for f in os.listdir(LOG_DIR) if f.endswith('.log')]
1353
+ if not files:
1354
+ return "No log files found."
1355
+ return "\n".join([f"- {f}" for f in sorted(files, reverse=True)])
1356
+ except Exception:
1357
+ return "Could not list log files."
1358
+
1359
+ log_files_output = gr.Markdown(list_log_files())
1360
+
1361
+ # ════════════════════════════════════════════════════════════════════════
1362
+ # SETTINGS VIEW
1363
+ # ════════════════════════════════════════════════════════════════════════
1364
+ with gr.Column(elem_id="settings-view", visible=False) as settings_view:
1365
+ with gr.Column(elem_classes="view-container"):
1366
+ gr.Markdown("## Settings & System")
1367
+
1368
+ with gr.Group(elem_classes="info-card"):
1369
+ gr.Markdown("### System Status")
1370
+ system_status_btn = gr.Button("Refresh Status", variant="primary", size="sm")
1371
+ system_status_output = gr.JSON(label="System Info")
1372
+
1373
+ system_status_btn.click(
1374
+ get_system_info,
1375
+ outputs=[system_status_output],
1376
+ )
1377
+
1378
+ with gr.Group(elem_classes="info-card"):
1379
+ gr.Markdown("### Environment")
1380
+ gr.Markdown(f"""
1381
+ | Variable | Status |
1382
+ |----------|--------|
1383
+ | `HF_TOKEN` | {"βœ… Set" if HF_TOKEN else "❌ Not set"} |
1384
+ | `OPENAI_API_KEY` | {"βœ… Set" if OPENAI_API_KEY else "❌ Not set"} |
1385
+ | `LLAMA_GGUF_PATH` | {"βœ… " + LLAMA_GGUF_PATH if LLAMA_GGUF_PATH else "❌ Not set"} |
1386
+ | `SPACE_ID` | {os.environ.get("SPACE_ID", "local")} |
1387
+ """)
1388
+
1389
+ with gr.Group(elem_classes="info-card"):
1390
+ gr.Markdown("### API Information")
1391
+ gr.Markdown("""
1392
+ This app exposes Gradio's built-in API.
1393
+
1394
+ **Example with gradio_client:**
1395
+ ```python
1396
+ from gradio_client import Client
1397
+ client = Client("your-space-url")
1398
+ result = client.predict(...)
1399
+ ```
1400
+ """)
1401
+
1402
+ # ════════════════════════════════════════════════════════════════════════
1403
+ # ABOUT VIEW
1404
+ # ════════════════════════════════════════════════════════════════════════
1405
+ with gr.Column(elem_id="about-view", visible=False) as about_view:
1406
+ with gr.Column(elem_classes="view-container"):
1407
+ gr.Markdown(f"""
1408
+ ## About TextAI
1409
 
1410
+ **Version:** {VERSION}
1411
 
1412
  ---
1413
 
1414
  ### Features
1415
 
1416
+ - **Multi-backend support**: Demo, OpenAI-compatible, HuggingFace Inference, local llama.cpp
1417
+ - **Model Manager**: Search HuggingFace Hub for models
1418
+ - **File Manager**: Browse data/logs directories
1419
+ - **Live Logs**: View application logs in real-time
1420
+ - **Conversation management**: Create, rename, delete, switch between chats
1421
+ - **Auto-titling**: Conversations are automatically titled from first message
1422
+ - **Export/Import**: Save and load conversation history as JSON
1423
 
1424
  ---
1425
 
1426
+ ### Navigation
1427
 
1428
+ | Tab | Description |
1429
+ |-----|-------------|
1430
+ | **Chat** | Main chat interface |
1431
+ | **Models** | Search HuggingFace Hub for models |
1432
+ | **Files** | Browse and view files |
1433
+ | **Logs** | View application logs |
1434
+ | **Settings** | System status and configuration |
1435
+ | **About** | This page |
1436
+
1437
+ ---
1438
+
1439
+ ### Backends
1440
+
1441
+ | Backend | Description |
1442
+ |---------|-------------|
1443
+ | Demo (offline) | Echo mode - no AI, just reflects your input |
1444
+ | OpenAI-compatible | Works with OpenAI, Azure, or any compatible API |
1445
+ | HuggingFace Inference | Uses HF Inference API with your token |
1446
+ | Local (llama.cpp) | Run local GGUF models (requires llama-cpp-python) |
1447
 
1448
  ---
1449
 
1450
  ### Keyboard Shortcuts
1451
 
1452
  - `Enter` - Send message
1453
+ - `Shift+Enter` - New line in message
1454
 
1455
  ---
1456
 
1457
+ Built with [Gradio](https://gradio.app) | [HuggingFace](https://huggingface.co)
1458
  """)
1459
 
1460
+ # ════════════════════════════════════════════════════════════════════════════
1461
+ # EVENT WIRING
1462
+ # ════════════════════════════════════════════════════════════════════════════
1463
 
1464
+ # Sidebar toggle
1465
+ sidebar_toggle.click(
1466
+ toggle_sidebar,
1467
+ inputs=[sidebar_visible],
1468
+ outputs=[sidebar, sidebar_visible],
1469
+ queue=False,
1470
+ )
1471
 
1472
+ # View switching via dropdown
1473
+ nav_dropdown.change(
1474
+ switch_view,
1475
+ inputs=[nav_dropdown],
1476
+ outputs=[chat_view, models_view, files_view, logs_view, settings_view, about_view],
1477
+ queue=False,
1478
+ )
1479
+
1480
+ # Initialize
1481
+ def _init(s):
1482
+ choices = _conv_choices(s)
1483
+ conv = _active_conv(s)
1484
+ return choices, s["active_id"], conv["title"], conv["messages"]
1485
+
1486
+ demo.load(
1487
+ _init,
1488
+ inputs=[state],
1489
+ outputs=[chat_selector, chat_selector, rename_box, chatbot],
1490
+ queue=False,
1491
+ )
1492
+
1493
+ # Chat operations
1494
+ new_chat_btn.click(
1495
+ ui_new_chat,
1496
+ inputs=[state],
1497
+ outputs=[state, chat_selector, chat_selector, chatbot, rename_box],
1498
+ queue=False,
1499
+ )
1500
+
1501
+ chat_selector.change(
1502
+ ui_select_chat,
1503
+ inputs=[state, chat_selector],
1504
+ outputs=[state, chatbot, rename_box],
1505
+ queue=False,
1506
+ )
1507
+
1508
+ rename_btn.click(
1509
+ ui_rename_chat,
1510
+ inputs=[state, rename_box],
1511
+ outputs=[state, chat_selector],
1512
+ queue=False,
1513
+ )
1514
 
1515
+ delete_btn.click(
1516
+ ui_delete_chat,
1517
+ inputs=[state],
1518
+ outputs=[state, chat_selector, chat_selector, chatbot, rename_box],
1519
+ queue=False,
1520
+ )
1521
+
1522
+ clear_btn.click(
1523
+ ui_clear_chat,
1524
+ inputs=[state],
1525
+ outputs=[state, chatbot],
1526
+ queue=False,
1527
+ )
1528
+
1529
+ # Export/Import
1530
+ export_btn.click(
1531
+ ui_export,
1532
+ inputs=[state],
1533
+ outputs=[export_file],
1534
+ queue=False,
1535
+ ).then(
1536
+ lambda p: gr.File(value=p, visible=True),
1537
+ inputs=[export_file],
1538
+ outputs=[export_file],
1539
+ queue=False,
1540
+ )
1541
+
1542
+ import_btn.click(
1543
+ ui_import,
1544
+ inputs=[state, import_file],
1545
+ outputs=[state, chat_selector, chat_selector, chatbot, rename_box],
1546
+ queue=False,
1547
+ )
1548
+
1549
+ # Regenerate
1550
+ regen_btn.click(
1551
+ ui_regenerate,
1552
+ inputs=[state],
1553
+ outputs=[state, chatbot],
1554
+ queue=False,
1555
+ ).then(
1556
+ ui_generate_response,
1557
+ inputs=[state, backend, system_prompt, temperature, max_tokens,
1558
+ openai_base_url, openai_api_key, openai_model, hf_token_box, hf_model_box],
1559
+ outputs=[state, chatbot],
1560
+ queue=True,
1561
+ )
1562
+
1563
+ # Send message
1564
+ send_btn.click(
1565
+ ui_add_user_message,
1566
+ inputs=[state, user_input],
1567
+ outputs=[state, chatbot, user_input],
1568
+ queue=False,
1569
+ ).then(
1570
+ ui_generate_response,
1571
+ inputs=[state, backend, system_prompt, temperature, max_tokens,
1572
+ openai_base_url, openai_api_key, openai_model, hf_token_box, hf_model_box],
1573
+ outputs=[state, chatbot],
1574
+ queue=True,
1575
+ )
1576
+
1577
+ user_input.submit(
1578
+ ui_add_user_message,
1579
+ inputs=[state, user_input],
1580
+ outputs=[state, chatbot, user_input],
1581
+ queue=False,
1582
+ ).then(
1583
+ ui_generate_response,
1584
+ inputs=[state, backend, system_prompt, temperature, max_tokens,
1585
+ openai_base_url, openai_api_key, openai_model, hf_token_box, hf_model_box],
1586
+ outputs=[state, chatbot],
1587
+ queue=True,
1588
+ )
1589
+
1590
+
1591
+ # ════════════════════════════════════════════════════════════════════════════════
1592
+ # MAIN
1593
+ # ════════════════════════════════════════════════════════════════════════════════
1594
  if __name__ == "__main__":
1595
+ LOG.info("Starting Gradio server...")
1596
+ demo.launch(
1597
  server_name="0.0.0.0",
1598
  server_port=7860,
1599
  share=False,
 
 
1600
  )
requirements.txt CHANGED
@@ -2,6 +2,3 @@ gradio>=4.0.0
2
  requests>=2.28.0
3
  huggingface_hub>=0.16.0
4
  psutil>=5.9.0
5
- # llama-cpp-python requires compilation - handled gracefully if missing
6
- # NOT included here due to build timeout on free HF Spaces
7
- # Users can install locally: pip install llama-cpp-python
 
2
  requests>=2.28.0
3
  huggingface_hub>=0.16.0
4
  psutil>=5.9.0
 
 
 
ui/theme.py CHANGED
@@ -5,7 +5,16 @@ import gradio as gr
5
 
6
  THEME_CSS = """
7
  /* ═══════════════════════════════════════════════════════════════════════════
8
- RESET & BASE
 
 
 
 
 
 
 
 
 
9
  ═══════════════════════════════════════════════════════════════════════════ */
10
  * {
11
  box-sizing: border-box;
@@ -25,29 +34,30 @@ body, .gradio-container {
25
  footer { display: none !important; }
26
 
27
  /* ═══════════════════════════════════════════════════════════════════════════
28
- MAIN LAYOUT
29
  ═══════════════════════════════════════════════════════════════════════════ */
30
  #main-container {
31
  display: flex !important;
32
- height: calc(100vh - 60px) !important;
33
- max-height: calc(100vh - 60px) !important;
34
  overflow: hidden !important;
35
  gap: 0 !important;
36
  }
37
 
38
  /* ═══════════════════════════════════════════════════════════════════════════
39
- SIDEBAR
40
  ═══════════════════════════════════════════════════════════════════════════ */
41
  #sidebar {
42
  background: #171717 !important;
43
  border-right: 1px solid #333 !important;
44
  padding: 16px !important;
45
  overflow-y: auto !important;
46
- max-height: calc(100vh - 60px) !important;
 
47
  }
48
 
49
  #sessions-table {
50
- max-height: 300px !important;
51
  overflow-y: auto !important;
52
  }
53
 
@@ -60,13 +70,13 @@ footer { display: none !important; }
60
  }
61
 
62
  /* ═══════════════════════════════════════════════════════════════════════════
63
- CHAT AREA
64
  ═══════════════════════════════════════════════════════════════════════════ */
65
  #chat-area {
66
  display: flex !important;
67
  flex-direction: column !important;
68
- height: calc(100vh - 60px) !important;
69
- max-height: calc(100vh - 60px) !important;
70
  overflow: hidden !important;
71
  background: #0d0d0d !important;
72
  }
@@ -93,28 +103,24 @@ footer { display: none !important; }
93
  }
94
 
95
  /* ═══════════════════════════════════════════════════════════════════════════
96
- CHATBOT - CRITICAL FIXED HEIGHT
97
  ═══════════════════════════════════════════════════════════════════════════ */
98
  #chatbot {
99
- flex: 1 !important;
100
- min-height: 0 !important;
101
- max-height: calc(100vh - 280px) !important;
102
  overflow: hidden !important;
103
  }
104
 
105
- #chatbot > div {
106
- height: 100% !important;
107
- max-height: 100% !important;
108
- }
109
-
110
  #chatbot .wrapper {
111
  height: 100% !important;
112
  max-height: 100% !important;
113
  overflow-y: auto !important;
114
  }
115
 
116
- #chatbot .messages-wrapper {
117
- max-height: 100% !important;
118
  overflow-y: auto !important;
119
  }
120
 
 
5
 
6
  THEME_CSS = """
7
  /* ═══════════════════════════════════════════════════════════════════════════
8
+ HUGGINGFACE SPACES IFRAME FIX
9
+ ═══════════════════════════════════════════════════════════════════════════
10
+ CRITICAL: HuggingFace uses iframeResizer which auto-sizes iframe to content.
11
+
12
+ Problem: Using 100vh or flex-grow:1 causes infinite height because:
13
+ - iframeResizer measures content height
14
+ - 100vh refers to iframe viewport which grows with content
15
+ - Circular dependency = infinite growth (46569px observed!)
16
+
17
+ Solution: Use FIXED PIXEL HEIGHTS for all containers.
18
  ═══════════════════════════════════════════════════════════════════════════ */
19
  * {
20
  box-sizing: border-box;
 
34
  footer { display: none !important; }
35
 
36
  /* ═══════════════════════════════════════════════════════════════════════════
37
+ MAIN LAYOUT - FIXED PIXEL HEIGHT (NOT 100vh!)
38
  ═══════════════════════════════════════════════════════════════════════════ */
39
  #main-container {
40
  display: flex !important;
41
+ height: 600px !important;
42
+ max-height: 600px !important;
43
  overflow: hidden !important;
44
  gap: 0 !important;
45
  }
46
 
47
  /* ═══════════════════════════════════════════════════════════════════════════
48
+ SIDEBAR - FIXED HEIGHT
49
  ═══════════════════════════════════════════════════════════════════════════ */
50
  #sidebar {
51
  background: #171717 !important;
52
  border-right: 1px solid #333 !important;
53
  padding: 16px !important;
54
  overflow-y: auto !important;
55
+ height: 600px !important;
56
+ max-height: 600px !important;
57
  }
58
 
59
  #sessions-table {
60
+ max-height: 200px !important;
61
  overflow-y: auto !important;
62
  }
63
 
 
70
  }
71
 
72
  /* ═══════════════════════════════════════════════════════════════════════════
73
+ CHAT AREA - FIXED HEIGHT
74
  ═══════════════════════════════════════════════════════════════════════════ */
75
  #chat-area {
76
  display: flex !important;
77
  flex-direction: column !important;
78
+ height: 600px !important;
79
+ max-height: 600px !important;
80
  overflow: hidden !important;
81
  background: #0d0d0d !important;
82
  }
 
103
  }
104
 
105
  /* ═══════════════════════════════════════════════════════════════════════════
106
+ CHATBOT - FIXED PIXEL HEIGHT (Critical!)
107
  ═══════════════════════════════════════════════════════════════════════════ */
108
  #chatbot {
109
+ height: 400px !important;
110
+ min-height: 400px !important;
111
+ max-height: 400px !important;
112
  overflow: hidden !important;
113
  }
114
 
115
+ #chatbot > div,
 
 
 
 
116
  #chatbot .wrapper {
117
  height: 100% !important;
118
  max-height: 100% !important;
119
  overflow-y: auto !important;
120
  }
121
 
122
+ #chatbot .bubble-wrap {
123
+ max-height: 360px !important;
124
  overflow-y: auto !important;
125
  }
126