rbt2025 commited on
Commit
02daacc
Β·
verified Β·
1 Parent(s): d34fc35

Deploy TextAI v2 - Clean architecture

Browse files
app.py CHANGED
@@ -1,746 +1,839 @@
1
  """
2
- TextAI - ChatGPT-Style Chat Interface
3
- Clean professional UI with Gradio 6.0 compatibility
4
  """
5
- import os
6
- import json
7
- import time
8
- import uuid
9
- from typing import Any, Dict, List, Optional, Tuple
10
-
11
  import gradio as gr
12
- import psutil
13
- import requests
14
-
15
- try:
16
- from huggingface_hub import InferenceClient
17
- except ImportError:
18
- InferenceClient = None
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  try:
21
- from llama_cpp import Llama
22
- except ImportError:
23
- Llama = None
24
-
25
-
26
- # ═══════════════════════════════════════════════════════════════════════════════
27
- # Configuration
28
- # ═══════════════════════════════════════════════════════════════════════════════
29
- APP_TITLE = "TextAI"
30
- VERSION = "3.0.0"
31
-
32
- DEFAULT_SYSTEM_PROMPT = "You are a helpful AI assistant. Provide clear, accurate, and helpful responses."
33
-
34
- OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1")
35
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
36
- OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
37
-
38
- HF_TOKEN = os.getenv("HF_TOKEN", "")
39
- HF_MODEL = os.getenv("HF_MODEL", "meta-llama/Llama-3.1-8B-Instruct")
40
-
41
- LLAMA_GGUF_PATH = os.getenv("LLAMA_GGUF_PATH", "")
42
-
43
-
44
- # ═══════════════════════════════════════════════════════════════════════════════
45
- # State Management
46
- # ═══════════════════════════════════════════════════════════════════════════════
47
- def _new_conv(title: str = "New chat") -> Dict[str, Any]:
48
- return {
49
- "id": str(uuid.uuid4()),
50
- "title": title,
51
- "messages": [],
52
- "created": time.time(),
53
- "updated": time.time(),
54
- }
55
-
56
-
57
- def _default_state() -> Dict[str, Any]:
58
- conv = _new_conv("New chat")
59
- return {"active_id": conv["id"], "conversations": [conv]}
60
-
61
-
62
- def _get_conv(state: Dict[str, Any], conv_id: str) -> Dict[str, Any]:
63
- for c in state["conversations"]:
64
- if c["id"] == conv_id:
65
- return c
66
- conv = _new_conv("New chat")
67
- state["conversations"].append(conv)
68
- state["active_id"] = conv["id"]
69
- return conv
70
-
71
-
72
- def _active_conv(state: Dict[str, Any]) -> Dict[str, Any]:
73
- return _get_conv(state, state["active_id"])
74
-
75
-
76
- def _conv_choices(state: Dict[str, Any]) -> List[Tuple[str, str]]:
77
- return [(c["title"].strip() or "Untitled", c["id"]) for c in state["conversations"]]
78
-
79
-
80
- def _set_active(state: Dict[str, Any], conv_id: str) -> Dict[str, Any]:
81
- _get_conv(state, conv_id)
82
- state["active_id"] = conv_id
83
- return state
84
-
85
-
86
- def _touch(conv: Dict[str, Any]) -> None:
87
- conv["updated"] = time.time()
88
-
89
-
90
- def _estimate_title(conv: Dict[str, Any]) -> str:
91
- for m in conv["messages"]:
92
- if m.get("role") == "user":
93
- content = m.get("content", "")
94
- if isinstance(content, list):
95
- txt = " ".join(str(c) for c in content if isinstance(c, str))
96
- else:
97
- txt = str(content)
98
- txt = txt.strip().splitlines()[0] if txt.strip() else ""
99
- return (txt[:40] + "…") if len(txt) > 40 else (txt or "New chat")
100
- return "New chat"
101
-
102
-
103
- def _to_chatbot_format(messages: List[Dict[str, Any]]) -> List[Tuple[Optional[str], Optional[str]]]:
104
- """Convert messages to Gradio Chatbot tuple format: [(user, assistant), ...]"""
105
- result = []
106
- i = 0
107
- while i < len(messages):
108
- user_msg = None
109
- assistant_msg = None
110
-
111
- # Get user message
112
- if i < len(messages) and messages[i].get("role") == "user":
113
- content = messages[i].get("content", "")
114
- if isinstance(content, list):
115
- user_msg = " ".join(str(c) if isinstance(c, str) else f"[file]" for c in content)
116
- else:
117
- user_msg = str(content)
118
- i += 1
119
-
120
- # Get assistant message
121
- if i < len(messages) and messages[i].get("role") == "assistant":
122
- assistant_msg = str(messages[i].get("content", ""))
123
- i += 1
124
-
125
- if user_msg is not None or assistant_msg is not None:
126
- result.append((user_msg, assistant_msg))
127
-
128
- return result
129
-
130
-
131
- def _strip_content_for_prompt(messages: List[Dict[str, Any]]) -> List[Dict[str, str]]:
132
- """Convert messages to simple role/content format for API calls"""
133
- out = []
134
- for m in messages:
135
- role = m.get("role", "user")
136
- content = m.get("content", "")
137
- if isinstance(content, list):
138
- parts = []
139
- for item in content:
140
- if isinstance(item, str):
141
- parts.append(item)
142
- elif isinstance(item, dict) and "path" in item:
143
- parts.append(f"[file: {os.path.basename(str(item['path']))}]")
144
- else:
145
- parts.append("[attachment]")
146
- content_text = " ".join(parts).strip()
147
- else:
148
- content_text = str(content)
149
- out.append({"role": role, "content": content_text})
150
- return out
151
-
152
-
153
- # ═══════════════════════════════════════════════════════════════════════════════
154
- # System Stats
155
- # ═══════════════════════════════════════════════════════════════════════════════
156
- def system_stats_md() -> str:
157
- try:
158
- cpu = psutil.cpu_percent(interval=0.05)
159
- mem = psutil.virtual_memory()
160
- return f"CPU {cpu:.0f}% Β· RAM {mem.percent:.0f}%"
161
- except Exception:
162
- return ""
163
-
164
-
165
- # ═══════════════════════════════════════════════════════════════════════════════
166
- # Backends
167
- # ═══════════════════════════════════════════════════════════════════════════════
168
- def call_openai_compat(messages, temperature, max_tokens, model, base_url, api_key):
169
- if not api_key:
170
- raise RuntimeError("Missing API key")
171
- url = base_url.rstrip("/") + "/chat/completions"
172
- headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
173
- payload = {
174
- "model": model,
175
- "messages": messages,
176
- "temperature": float(temperature),
177
- "max_tokens": int(max_tokens),
178
- "stream": False,
179
- }
180
- r = requests.post(url, headers=headers, json=payload, timeout=90)
181
- r.raise_for_status()
182
- return r.json()["choices"][0]["message"]["content"]
183
-
184
-
185
- def call_hf_inference(messages, temperature, max_tokens, model, token):
186
- if InferenceClient is None:
187
- raise RuntimeError("huggingface_hub not installed")
188
- if not token:
189
- raise RuntimeError("Missing HF_TOKEN")
190
- client = InferenceClient(model=model, token=token)
191
-
192
- sys_prompt = ""
193
- turns = []
194
- for m in messages:
195
- if m["role"] == "system":
196
- sys_prompt = m["content"]
197
- elif m["role"] == "user":
198
- turns.append(f"User: {m['content']}")
199
- else:
200
- turns.append(f"Assistant: {m['content']}")
201
-
202
- prompt = (f"System: {sys_prompt}\n" if sys_prompt else "") + "\n".join(turns) + "\nAssistant:"
203
-
204
- return client.text_generation(
205
- prompt,
206
- max_new_tokens=int(max_tokens),
207
- temperature=float(temperature),
208
- do_sample=True,
209
- return_full_text=False,
210
  )
211
-
212
-
213
- _LLAMA_INSTANCE = None
214
-
215
- def call_llama_local(messages, temperature, max_tokens):
216
- global _LLAMA_INSTANCE
217
- if Llama is None:
218
- raise RuntimeError("llama-cpp-python not installed")
219
- if not LLAMA_GGUF_PATH:
220
- raise RuntimeError("Set LLAMA_GGUF_PATH")
221
- if _LLAMA_INSTANCE is None:
222
- _LLAMA_INSTANCE = Llama(model_path=LLAMA_GGUF_PATH, n_ctx=4096, verbose=False)
223
-
224
- sys_prompt = ""
225
- turns = []
226
- for m in messages:
227
- if m["role"] == "system":
228
- sys_prompt = m["content"]
229
- elif m["role"] == "user":
230
- turns.append(f"User: {m['content']}")
231
- else:
232
- turns.append(f"Assistant: {m['content']}")
233
-
234
- prompt = (f"System: {sys_prompt}\n" if sys_prompt else "") + "\n".join(turns) + "\nAssistant:"
235
- res = _LLAMA_INSTANCE(prompt, max_tokens=int(max_tokens), temperature=float(temperature), stop=["User:"])
236
- return res["choices"][0]["text"].strip()
237
-
238
-
239
- def call_demo(messages, *args, **kwargs):
240
- last_user = ""
241
- for m in reversed(messages):
242
- if m["role"] == "user":
243
- last_user = m["content"]
244
- break
245
- return f"(Demo mode) You said: {last_user}\n\nConfigure a backend in Settings to use real AI."
246
-
247
-
248
- def generate_response(backend, messages, temperature, max_tokens, openai_base, openai_key, openai_model, hf_token, hf_model):
249
- if backend == "OpenAI-compatible":
250
- return call_openai_compat(messages, temperature, max_tokens, openai_model, openai_base, openai_key)
251
- if backend == "Hugging Face Inference":
252
- return call_hf_inference(messages, temperature, max_tokens, hf_model, hf_token)
253
- if backend == "llama.cpp (local)":
254
- return call_llama_local(messages, temperature, max_tokens)
255
- return call_demo(messages)
256
-
257
-
258
- def pseudo_stream(text, chunk=6, delay=0.01):
259
- for i in range(0, len(text), chunk):
260
- yield text[:i + chunk]
261
- time.sleep(delay)
262
-
263
-
264
- # ═══════════════════════════════════════════════════════════════════════════════
265
- # UI Event Handlers
266
- # ═══════════════════════════════════════════════════════════════════════════════
267
- def ui_new_chat(state):
268
- conv = _new_conv("New chat")
269
- state["conversations"].insert(0, conv)
270
- state["active_id"] = conv["id"]
271
- return state, _conv_choices(state), conv["id"], []
272
-
273
-
274
- def ui_select_chat(state, conv_id):
275
- state = _set_active(state, conv_id)
276
- conv = _active_conv(state)
277
- return state, _to_chatbot_format(conv["messages"]), conv["title"]
278
-
279
-
280
- def ui_rename_chat(state, new_title):
281
- conv = _active_conv(state)
282
- conv["title"] = (new_title or "").strip() or "New chat"
283
- _touch(conv)
284
- return state, _conv_choices(state)
285
-
286
-
287
- def ui_delete_chat(state):
288
- active = state["active_id"]
289
- state["conversations"] = [c for c in state["conversations"] if c["id"] != active]
290
- if not state["conversations"]:
291
- state = _default_state()
292
- else:
293
- state["active_id"] = state["conversations"][0]["id"]
294
- conv = _active_conv(state)
295
- return state, _conv_choices(state), conv["id"], _to_chatbot_format(conv["messages"]), conv["title"]
296
-
297
-
298
- def ui_export(state):
299
- path = os.path.abspath("chat_export.json")
300
- with open(path, "w", encoding="utf-8") as f:
301
- json.dump(state, f, ensure_ascii=False, indent=2)
302
- return path
303
-
304
-
305
- def ui_import(state, file_obj):
306
- if file_obj is None:
307
- conv = _active_conv(state)
308
- return state, _conv_choices(state), state["active_id"], _to_chatbot_format(conv["messages"]), conv["title"]
309
  try:
310
- with open(file_obj.name, "r", encoding="utf-8") as f:
311
- loaded = json.load(f)
312
- if isinstance(loaded, dict) and "conversations" in loaded:
313
- state = loaded
314
- if not state.get("conversations"):
315
- state = _default_state()
316
- if not state.get("active_id"):
317
- state["active_id"] = state["conversations"][0]["id"]
318
- except Exception:
319
- pass
320
- conv = _active_conv(state)
321
- return state, _conv_choices(state), conv["id"], _to_chatbot_format(conv["messages"]), conv["title"]
322
-
323
-
324
- def ui_add_user_message(state, mm_value):
325
- conv = _active_conv(state)
326
-
327
- text = ""
328
- files = []
329
- if isinstance(mm_value, dict):
330
- text = str(mm_value.get("text") or "")
331
- files = mm_value.get("files") or []
332
-
333
- content_parts = []
334
- if text.strip():
335
- content_parts.append(text.strip())
336
-
337
- if isinstance(files, list):
338
- for f in files:
339
- if isinstance(f, dict) and "path" in f:
340
- content_parts.append({"path": f["path"]})
341
- elif isinstance(f, str):
342
- content_parts.append({"path": f})
343
-
344
- if not content_parts:
345
- return state, _to_chatbot_format(conv["messages"]), gr.MultimodalTextbox(value=None)
346
-
347
- conv["messages"].append({
348
- "role": "user",
349
- "content": content_parts if len(content_parts) > 1 else content_parts[0]
350
- })
351
- _touch(conv)
352
-
353
- if conv["title"] == "New chat":
354
- conv["title"] = _estimate_title(conv)
355
-
356
- return state, _to_chatbot_format(conv["messages"]), gr.MultimodalTextbox(value=None)
357
-
 
358
 
359
- def ui_regenerate_prepare(state):
360
- conv = _active_conv(state)
361
- if conv["messages"] and conv["messages"][-1].get("role") == "assistant":
362
- conv["messages"].pop()
363
- _touch(conv)
364
- return state, _to_chatbot_format(conv["messages"])
365
 
 
 
 
366
 
367
- def ui_generate_assistant(state, backend, system_prompt, temperature, max_tokens,
368
- openai_base, openai_key, openai_model, hf_token, hf_model):
369
- conv = _active_conv(state)
370
 
371
- prompt_messages = [{"role": "system", "content": system_prompt.strip() or DEFAULT_SYSTEM_PROMPT}]
372
- prompt_messages += _strip_content_for_prompt(conv["messages"])
 
373
 
374
- conv["messages"].append({"role": "assistant", "content": ""})
375
- _touch(conv)
376
 
377
- try:
378
- full = generate_response(
379
- backend, prompt_messages, temperature, max_tokens,
380
- openai_base, openai_key, openai_model, hf_token, hf_model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
381
  )
382
- except Exception as e:
383
- full = f"Error: {e}"
384
-
385
- for partial in pseudo_stream(full):
386
- conv["messages"][-1]["content"] = partial
387
- yield state, _to_chatbot_format(conv["messages"])
388
-
389
-
390
- # ═══════════════════════════════════════════════════════════════════════════════
391
- # CSS - ChatGPT Style
392
- # ═══════════════════════════════════════════════════════════════════════════════
393
- CSS = """
394
- :root {
395
- --bg-dark: #0d0d0d;
396
- --bg-panel: #171717;
397
- --bg-input: #2f2f2f;
398
- --border: #3d3d3d;
399
- --text: #ececec;
400
- --text-muted: #8e8e8e;
401
- --accent: #10a37f;
402
- --accent-hover: #0d8a6c;
403
- }
404
 
405
- * { box-sizing: border-box; }
406
-
407
- body, .gradio-container {
408
- background: var(--bg-dark) !important;
409
- color: var(--text) !important;
410
- }
411
-
412
- .gradio-container {
413
- max-width: 1400px !important;
414
- margin: 0 auto !important;
415
- padding: 0 !important;
416
- }
417
-
418
- footer { display: none !important; }
419
-
420
- /* Main Layout */
421
- #layout {
422
- display: flex !important;
423
- height: 92vh !important;
424
- gap: 0 !important;
425
- }
426
 
427
- /* Sidebar */
428
- #sidebar {
429
- width: 280px !important;
430
- min-width: 280px !important;
431
- background: var(--bg-panel) !important;
432
- border-right: 1px solid var(--border) !important;
433
- display: flex !important;
434
- flex-direction: column !important;
435
- padding: 16px !important;
436
- }
437
 
438
- #sidebar h3 {
439
- margin: 0 0 12px 0 !important;
440
- font-size: 18px !important;
441
- color: var(--text) !important;
442
- }
443
 
444
- .stats {
445
- color: var(--text-muted) !important;
446
- font-size: 12px !important;
447
- margin-bottom: 16px !important;
448
- }
449
 
450
- .smalllabel {
451
- color: var(--text-muted) !important;
452
- font-size: 12px !important;
453
- margin: 16px 0 6px 0 !important;
454
- }
455
 
456
- /* Main Chat Panel */
457
- #main {
458
- flex: 1 !important;
459
- display: flex !important;
460
- flex-direction: column !important;
461
- background: var(--bg-dark) !important;
462
- overflow: hidden !important;
463
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
464
 
465
- /* Chatbot */
466
- #chatbot {
467
- flex: 1 !important;
468
- min-height: 400px !important;
469
- border: none !important;
470
- background: transparent !important;
471
- }
472
 
473
- #chatbot .message {
474
- padding: 20px !important;
475
- max-width: 800px !important;
476
- margin: 0 auto !important;
477
- }
478
 
479
- /* Input area */
480
- .input-area {
481
- padding: 16px 20% !important;
482
- background: var(--bg-dark) !important;
483
- border-top: 1px solid var(--border) !important;
484
- }
485
 
486
- @media (max-width: 1200px) {
487
- .input-area { padding: 16px 10% !important; }
488
- }
 
 
489
 
490
- @media (max-width: 768px) {
491
- .input-area { padding: 16px !important; }
492
- #sidebar { display: none !important; }
493
- }
 
 
 
 
 
 
 
 
 
 
 
 
494
 
495
- /* Buttons */
496
- button.primary {
497
- background: var(--accent) !important;
498
- color: white !important;
499
- border: none !important;
500
- }
501
 
502
- button.primary:hover {
503
- background: var(--accent-hover) !important;
504
- }
 
505
 
506
- button.secondary {
507
- background: var(--bg-input) !important;
508
- color: var(--text) !important;
509
- border: 1px solid var(--border) !important;
510
- }
511
 
512
- /* Form elements */
513
- input, textarea, select {
514
- background: var(--bg-input) !important;
515
- border: 1px solid var(--border) !important;
516
- color: var(--text) !important;
517
- border-radius: 8px !important;
518
- }
519
 
520
- input:focus, textarea:focus, select:focus {
521
- border-color: var(--accent) !important;
522
- outline: none !important;
523
- }
524
 
525
- /* Accordion */
526
- .accordion {
527
- background: var(--bg-panel) !important;
528
- border: 1px solid var(--border) !important;
529
- border-radius: 8px !important;
530
- margin: 8px 16px !important;
531
- }
532
 
533
- /* Scrollbar */
534
- ::-webkit-scrollbar { width: 8px; }
535
- ::-webkit-scrollbar-track { background: var(--bg-dark); }
536
- ::-webkit-scrollbar-thumb { background: var(--border); border-radius: 4px; }
537
- ::-webkit-scrollbar-thumb:hover { background: var(--text-muted); }
538
-
539
- .footerhint {
540
- color: var(--text-muted) !important;
541
- font-size: 11px !important;
542
- margin-top: auto !important;
543
- padding-top: 16px !important;
544
- }
545
- """
546
 
547
 
548
- # ═══════════════════════════════════════════════════════════════════════════════
549
- # Gradio App
550
- # ═══════════════════════════════════════════════════════════════════════════════
551
- with gr.Blocks(title=APP_TITLE) as demo:
552
- state = gr.State(_default_state())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
553
 
554
- with gr.Row(elem_id="layout", equal_height=True):
 
 
 
 
 
 
 
 
 
 
 
555
 
556
- # ═══════════════════════════════════════════════════════════════════════
557
- # Sidebar
558
- # ═══════════════════════════════════════════════════════════════════════
559
- with gr.Column(scale=3, elem_id="sidebar"):
560
- gr.Markdown(f"### {APP_TITLE}")
561
- stats = gr.Markdown(value=system_stats_md, every=2, elem_classes="stats")
562
 
563
- new_chat_btn = gr.Button("+ New chat", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
564
 
565
- gr.Markdown("<div class='smalllabel'>Conversation</div>")
566
- chat_selector = gr.Dropdown(choices=[], value=None, label=None, interactive=True)
 
 
 
 
 
 
 
 
 
 
 
 
567
 
568
- gr.Markdown("<div class='smalllabel'>Rename</div>")
569
- rename_box = gr.Textbox(value="", placeholder="Chat title", label=None)
570
- rename_btn = gr.Button("Save title", variant="secondary")
 
 
571
 
572
- with gr.Row():
573
- delete_btn = gr.Button("Delete", variant="secondary")
574
- regen_btn = gr.Button("Regenerate", variant="secondary")
 
 
575
 
576
- gr.Markdown("---")
 
 
 
 
 
 
 
 
 
 
 
 
 
577
 
578
- with gr.Row():
579
- export_btn = gr.Button("Export", variant="secondary")
580
- export_file = gr.File(label=None, visible=False)
581
 
582
- import_file = gr.File(label="Import", file_types=[".json"])
583
- import_btn = gr.Button("Import", variant="secondary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
584
 
585
- gr.Markdown(f"<div class='footerhint'>v{VERSION} Β· OpenAI / HuggingFace / llama.cpp</div>")
 
 
 
 
 
 
 
 
 
 
 
 
 
586
 
587
- # ═══════════════════════════════════════════════════════════════════════
588
- # Main Chat Panel
589
- # ═══════════════════════════════════════════════════════════════════════
590
- with gr.Column(scale=7, elem_id="main"):
591
 
592
- with gr.Row():
593
- backend = gr.Dropdown(
594
- choices=["Demo (offline)", "OpenAI-compatible", "Hugging Face Inference", "llama.cpp (local)"],
595
- value="Demo (offline)",
596
- label="Backend",
597
- )
598
-
599
- with gr.Accordion("Settings", open=False):
600
- system_prompt = gr.Textbox(
601
- value=DEFAULT_SYSTEM_PROMPT,
602
- label="System prompt",
603
- lines=3,
604
- )
605
- with gr.Row():
606
- temperature = gr.Slider(0, 2, value=0.7, step=0.1, label="Temperature")
607
- max_tokens = gr.Slider(64, 4096, value=1024, step=64, label="Max tokens")
608
-
609
- gr.Markdown("#### OpenAI-compatible")
610
- with gr.Row():
611
- openai_base = gr.Textbox(value=OPENAI_BASE_URL, label="Base URL")
612
- openai_model = gr.Textbox(value=OPENAI_MODEL, label="Model")
613
- openai_key = gr.Textbox(value=OPENAI_API_KEY, label="API key", type="password")
614
-
615
- gr.Markdown("#### Hugging Face Inference")
616
- with gr.Row():
617
- hf_model_box = gr.Textbox(value=HF_MODEL, label="Model")
618
- hf_token_box = gr.Textbox(value=HF_TOKEN, label="HF token", type="password")
619
-
620
- if Llama is None:
621
- gr.Markdown("*llama-cpp-python not installed*")
622
-
623
- chatbot = gr.Chatbot(
624
- elem_id="chatbot",
625
- height=550,
626
- )
627
-
628
- composer = gr.MultimodalTextbox(
629
- placeholder="Message TextAI...",
630
- file_count="multiple",
631
- sources=["upload"],
632
- submit_btn=True,
633
- stop_btn=True,
634
- )
635
-
636
- # ═══════════════════════════════════════════════════════════════════════════
637
- # Event Wiring
638
- # ═══════════════════════════════════════════════════════════════════════════
639
-
640
- def _init_sidebar(s):
641
- choices = _conv_choices(s)
642
- active = s["active_id"]
643
- conv = _active_conv(s)
644
- return choices, active, conv["title"], _to_chatbot_format(conv["messages"])
645
-
646
- demo.load(
647
- _init_sidebar,
648
- inputs=[state],
649
- outputs=[chat_selector, chat_selector, rename_box, chatbot],
650
- queue=False,
651
- )
652
 
653
- # New chat
654
- new_chat_btn.click(
655
- ui_new_chat,
656
- inputs=[state],
657
- outputs=[state, chat_selector, chat_selector, chatbot],
658
- queue=False,
659
- ).then(lambda: "", None, rename_box, queue=False)
660
-
661
- # Select chat
662
- chat_selector.change(
663
- ui_select_chat,
664
- inputs=[state, chat_selector],
665
- outputs=[state, chatbot, rename_box],
666
- queue=False,
667
  )
668
 
669
- # Rename
670
- rename_btn.click(
671
- ui_rename_chat,
672
- inputs=[state, rename_box],
673
- outputs=[state, chat_selector],
674
- queue=False,
675
  )
676
 
677
- # Delete
678
- delete_btn.click(
679
- ui_delete_chat,
680
- inputs=[state],
681
- outputs=[state, chat_selector, chat_selector, chatbot, rename_box],
682
- queue=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
683
  )
684
 
685
- # Export
686
- export_btn.click(
687
- ui_export,
688
- inputs=[state],
689
- outputs=[export_file],
690
- queue=False,
691
- ).then(
692
- lambda p: gr.File(value=p, visible=True),
693
- inputs=[export_file],
694
- outputs=[export_file],
695
- queue=False,
696
- )
697
 
698
- # Import
699
- import_btn.click(
700
- ui_import,
701
- inputs=[state, import_file],
702
- outputs=[state, chat_selector, chat_selector, chatbot, rename_box],
703
- queue=False,
704
- )
705
 
706
- # Send message -> generate
707
- add_evt = composer.submit(
708
- ui_add_user_message,
709
- inputs=[state, composer],
710
- outputs=[state, chatbot, composer],
711
- queue=False,
712
- )
713
 
714
- add_evt.then(
715
- ui_generate_assistant,
716
- inputs=[state, backend, system_prompt, temperature, max_tokens,
717
- openai_base, openai_key, openai_model, hf_token_box, hf_model_box],
718
- outputs=[state, chatbot],
719
- queue=True,
720
- )
721
 
722
- # Regenerate
723
- regen_btn.click(
724
- ui_regenerate_prepare,
725
- inputs=[state],
726
- outputs=[state, chatbot],
727
- queue=False,
728
- ).then(
729
- ui_generate_assistant,
730
- inputs=[state, backend, system_prompt, temperature, max_tokens,
731
- openai_base, openai_key, openai_model, hf_token_box, hf_model_box],
732
- outputs=[state, chatbot],
733
- queue=True,
734
- )
735
 
 
 
 
736
 
737
- # ═══════════════════════════════════════════════════════════════════════════════
738
- # Main
739
- # ═══════════════════════════════════════════════════════════════════════════════
740
  if __name__ == "__main__":
741
- demo.launch(
742
- server_name="0.0.0.0",
743
- server_port=7860,
744
- share=False,
745
- css=CSS,
746
- )
 
1
  """
2
+ TextAI v2 - Simple Chat Interface
3
+ Based on text_space pattern - Chat tab + Tools tab
4
  """
 
 
 
 
 
 
5
  import gradio as gr
6
+ import json
 
 
 
 
 
 
7
 
8
+ # Core imports
9
+ from modules.config import VERSION
10
+ from modules.theme import DARK_CSS, get_theme
11
+ from modules.logger import logger
12
+ from modules.system import get_system_info, get_system_dashboard, get_space_info, get_storage_stats
13
+ from modules.utils import format_size
14
+
15
+ # File & Tools imports
16
+ from modules.file_manager import (
17
+ get_folder_contents, get_breadcrumb, get_status, get_quick_access_folders,
18
+ go_up, go_home, refresh_all, search_files, upload_files, download_file,
19
+ create_file, read_file, delete_item
20
+ )
21
+ from modules.ui_helpers import (
22
+ ui_create_folder, ui_create_file, ui_delete,
23
+ on_folder_tree_select, navigate_from_main,
24
+ get_events_table, get_logs_display,
25
+ ui_refresh_logs, ui_clear_logs
26
+ )
27
+ from modules.hf_hub import (
28
+ search_hf_models_enhanced, download_model_file,
29
+ list_downloaded_models, remove_model,
30
+ get_model_files, TEXT_MODELS_DIR, get_installed_model_ids
31
+ )
32
+ from modules.api import ping, get_api_list, api_get_logs
33
+
34
+ # TextAI imports
35
  try:
36
+ from modules.text_ai import (
37
+ model_manager, session_manager,
38
+ DEFAULT_CHAT_PROMPT, DEFAULT_ROLEPLAY_PROMPT,
39
+ api_list_models, api_load_model, api_unload_model, api_model_status,
40
+ api_chat, api_create_session, api_list_sessions, api_get_session,
41
+ api_delete_session, api_inference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  )
43
+ from modules.text_ui import (
44
+ get_chat_list, get_model_choices, get_current_model_display,
45
+ format_chat_history_tuples, get_models_table,
46
+ ui_new_chat, ui_load_session, ui_send_message, ui_rename_session,
47
+ ui_delete_session, ui_clear_chat, ui_load_model_by_index, ui_unload_model,
48
+ get_suggested_models_table, SUGGESTED_MODELS, DEFAULT_MODEL,
49
+ download_default_model
50
+ )
51
+ TEXTAI_AVAILABLE = True
52
+ except Exception as e:
53
+ print(f"TextAI import error: {e}")
54
+ TEXTAI_AVAILABLE = False
55
+ def get_chat_list(): return []
56
+ def get_current_model_display(): return "Not available"
57
+ def get_models_table(): return []
58
+ def get_suggested_models_table(): return []
59
+ def download_default_model(): return "TextAI not available"
60
+ model_manager = None
61
+ DEFAULT_MODEL = {"name": "TinyLlama 1.1B", "size": "0.7GB"}
62
+ DEFAULT_CHAT_PROMPT = ""
63
+
64
+
65
+ # ══════════════════════════════════════════════════════════════════════════════
66
+ # HELPER FUNCTIONS
67
+ # ══════════════════════════════════════════════════════════════════════════════
68
+
69
+ def get_initial_installed_models():
70
+ """Get installed models for initial table display"""
71
+ rows = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  try:
73
+ if not TEXT_MODELS_DIR.exists():
74
+ return rows
75
+ for d in TEXT_MODELS_DIR.iterdir():
76
+ if d.is_dir():
77
+ gguf_files = list(d.glob("*.gguf"))
78
+ if gguf_files:
79
+ for f in gguf_files:
80
+ size = format_size(f.stat().st_size)
81
+ rows.append([f.stem, "GGUF", size, "Ready"])
82
+ elif (d / "config.json").exists():
83
+ total_size = sum(f.stat().st_size for f in d.rglob("*") if f.is_file())
84
+ rows.append([d.name, "Transformers", format_size(total_size), "Ready"])
85
+ for f in TEXT_MODELS_DIR.glob("*.gguf"):
86
+ size = format_size(f.stat().st_size)
87
+ rows.append([f.stem, "GGUF", size, "Ready"])
88
+ except Exception as e:
89
+ print(f"Initial installed models error: {e}")
90
+ return rows
91
+
92
+
93
+ # ══════════════════════════════════════════════════════════════════════════════
94
+ # CUSTOM CSS
95
+ # ══════════════════════════════════════════════════════════════════════════════
96
+
97
+ CHAT_CSS = """
98
+ /* Clean ChatGPT/Grok Style */
99
+ .chat-container { max-width: 900px; margin: 0 auto; }
100
+ .sidebar { border-right: 1px solid #333; }
101
+ .chat-list { max-height: 70vh; overflow-y: auto; }
102
+ .chat-item { padding: 10px; cursor: pointer; border-radius: 8px; margin: 4px 0; }
103
+ .chat-item:hover { background: #2a2a2a; }
104
+ .menu-btn { background: transparent !important; border: none !important; }
105
+ .full-width { width: 100% !important; }
106
+ .no-label label { display: none !important; }
107
+ .model-badge {
108
+ background: #1a1a2e;
109
+ padding: 4px 12px;
110
+ border-radius: 20px;
111
+ font-size: 12px;
112
+ color: #888;
113
+ }
114
+ .settings-panel {
115
+ background: #1a1a1a;
116
+ border-radius: 12px;
117
+ padding: 16px;
118
+ }
119
+ #chatbot { min-height: 500px; }
120
+ .message { white-space: pre-wrap; }
121
+ """
122
 
 
 
 
 
 
 
123
 
124
+ # ══════════════════════════════════════════════════════════════════════════════
125
+ # BUILD APP
126
+ # ══════════════════════════════════════════════════════════════════════════════
127
 
128
+ with gr.Blocks(title="TextAI v2") as demo:
 
 
129
 
130
+ # Hidden state
131
+ current_session_id = gr.State("")
132
+ is_roleplay_mode = gr.State(False)
133
 
134
+ with gr.Tabs() as main_tabs:
 
135
 
136
+ # ══════════════════════════════════════════════════════════════════════
137
+ # TAB 1: CHAT
138
+ # ══════════════════════════════════════════════════════════════════════
139
+ with gr.Tab("Chat", id=0):
140
+ with gr.Row():
141
+ # LEFT SIDEBAR - Chat List
142
+ with gr.Column(scale=1, min_width=250):
143
+ with gr.Row():
144
+ btn_new_chat = gr.Button("+ New Chat", variant="primary", size="sm")
145
+
146
+ chat_list = gr.Dataframe(
147
+ headers=["id", "Chat"],
148
+ value=get_chat_list() if TEXTAI_AVAILABLE else [],
149
+ interactive=False,
150
+ row_count=15,
151
+ column_count=(2, "fixed"),
152
+ show_label=False
153
+ )
154
+
155
+ # MAIN CHAT AREA
156
+ with gr.Column(scale=4):
157
+ with gr.Row():
158
+ model_display = gr.Textbox(
159
+ value=get_current_model_display() if TEXTAI_AVAILABLE else "No model",
160
+ interactive=False,
161
+ show_label=False,
162
+ scale=3,
163
+ elem_classes="model-badge"
164
+ )
165
+ with gr.Column(scale=1):
166
+ menu_dropdown = gr.Dropdown(
167
+ choices=["Switch Model", "Rename Chat", "Delete Chat", "Clear Chat"],
168
+ label="",
169
+ show_label=False,
170
+ scale=1
171
+ )
172
+
173
+ chat_title = gr.Textbox(
174
+ value="New Chat",
175
+ show_label=False,
176
+ interactive=False,
177
+ elem_classes="no-label"
178
+ )
179
+
180
+ chatbot = gr.Chatbot(
181
+ label="",
182
+ height=500,
183
+ show_label=False,
184
+ elem_id="chatbot"
185
+ )
186
+
187
+ with gr.Row():
188
+ chat_input = gr.Textbox(
189
+ placeholder="Send a message...",
190
+ show_label=False,
191
+ scale=6,
192
+ lines=1,
193
+ max_lines=5
194
+ )
195
+ btn_send = gr.Button("Send", variant="primary", scale=1)
196
+
197
+ # RIGHT SIDEBAR - Quick Settings
198
+ with gr.Column(scale=1, min_width=180, visible=True):
199
+ gr.Markdown("### Settings")
200
+ max_tokens = gr.Slider(
201
+ minimum=64, maximum=2048, value=512, step=64,
202
+ label="Max Tokens"
203
+ )
204
+ temperature = gr.Slider(
205
+ minimum=0.1, maximum=2.0, value=0.7, step=0.1,
206
+ label="Temperature"
207
+ )
208
+ gr.Markdown("---")
209
+ roleplay_toggle = gr.Checkbox(label="Roleplay Mode", value=False)
210
+
211
+ # Hidden panels
212
+ with gr.Row(visible=False) as rename_panel:
213
+ rename_input = gr.Textbox(label="New Title", scale=3)
214
+ btn_rename = gr.Button("Rename", scale=1)
215
+ btn_cancel_rename = gr.Button("Cancel", scale=1)
216
+
217
+ with gr.Row(visible=False) as model_panel:
218
+ with gr.Column():
219
+ gr.Markdown("### Select Model")
220
+ with gr.Row():
221
+ model_choices = gr.Dropdown(
222
+ choices=get_model_choices() if TEXTAI_AVAILABLE else [],
223
+ label="Available Models",
224
+ scale=3
225
+ )
226
+ btn_refresh_model_list = gr.Button("πŸ”„", size="sm", scale=1)
227
+ with gr.Row():
228
+ btn_load_model = gr.Button("Load Model", variant="primary")
229
+ btn_close_model = gr.Button("Close")
230
+
231
+
232
+ # ══════════════════════════════════════════════════════════════════════
233
+ # TAB 2: TOOLS (Models, Files, HF Hub, System, Logs, API)
234
+ # ══════════════════════════════════════════════════════════════════════
235
+ with gr.Tab("Tools", id=1):
236
+ with gr.Tabs():
237
+
238
+ # MODEL MANAGER
239
+ with gr.Tab("Models"):
240
+ gr.Markdown("### Installed Models")
241
+ gr.Markdown("Click a model to load it. Download new models from **HF Hub** tab.")
242
+
243
+ with gr.Row():
244
+ current_model_text = gr.Textbox(
245
+ value=get_current_model_display() if TEXTAI_AVAILABLE else "No model loaded",
246
+ label="Currently Loaded",
247
+ interactive=False,
248
+ scale=3
249
+ )
250
+ btn_refresh_models = gr.Button("πŸ”„ Refresh", size="sm")
251
+ btn_unload_model = gr.Button("Unload", size="sm", variant="stop")
252
+
253
+ models_table = gr.Dataframe(
254
+ headers=["●", "Name", "Type", "Size", "Custom Prompt"],
255
+ value=get_models_table() if TEXTAI_AVAILABLE else [],
256
+ interactive=False,
257
+ row_count=6
258
+ )
259
+
260
+ model_status = gr.Textbox(label="", interactive=False, show_label=False)
261
+
262
+ with gr.Accordion("Quick Install", open=False):
263
+ gr.Markdown(f"**Recommended for testing:** {DEFAULT_MODEL['name']} ({DEFAULT_MODEL['size']})")
264
+ with gr.Row():
265
+ btn_install_default = gr.Button("Install TinyLlama", variant="primary", size="sm")
266
+ install_status = gr.Textbox(label="", interactive=False, show_label=False, scale=3)
267
+
268
+ gr.Markdown("**Other suggestions:**")
269
+ suggested_models = gr.Dataframe(
270
+ headers=["Name", "Size", "Model ID"],
271
+ value=get_suggested_models_table() if TEXTAI_AVAILABLE else [],
272
+ interactive=False,
273
+ row_count=4
274
+ )
275
+
276
+
277
+ # HF HUB - Model Search & Download
278
+ with gr.Tab("HF Hub"):
279
+ gr.Markdown("### Model Manager")
280
+
281
+ gr.Markdown("#### Installed Models")
282
+ installed_models_table = gr.Dataframe(
283
+ headers=["Name", "Type", "Size", "Status"],
284
+ value=get_initial_installed_models(),
285
+ interactive=False,
286
+ row_count=4
287
+ )
288
+ with gr.Row():
289
+ selected_installed_model = gr.Textbox(label="Selected", interactive=False, scale=2)
290
+ btn_refresh_installed = gr.Button("πŸ”„", size="sm")
291
+ btn_delete_installed = gr.Button("πŸ—‘οΈ Delete", size="sm", variant="stop")
292
+
293
+ gr.Markdown("---")
294
+ gr.Markdown("#### Search & Add Models")
295
+
296
+ with gr.Row():
297
+ hf_query = gr.Textbox(
298
+ label="",
299
+ placeholder="Search models... (tinyllama, mistral, phi, llama)",
300
+ scale=4,
301
+ show_label=False
302
+ )
303
+ hf_max_params = gr.Dropdown(
304
+ label="Size",
305
+ choices=[("< 3B", "3"), ("< 7B", "7"), ("Any", "0")],
306
+ value="7",
307
+ scale=1
308
+ )
309
+ btn_hf_search = gr.Button("Search", variant="primary", scale=1)
310
+
311
+ hf_status = gr.Textbox(label="", interactive=False, show_label=False)
312
+
313
+ hf_results = gr.Dataframe(
314
+ headers=["Model ID", "Params", "Est.Size", "Status", "Downloads"],
315
+ interactive=False,
316
+ row_count=8
317
+ )
318
+
319
+ selected_model_id = gr.Textbox(label="Selected", interactive=False)
320
+
321
+ with gr.Row():
322
+ btn_view_files = gr.Button("View Files", scale=1)
323
+ btn_auto_add = gr.Button("Auto Add (Best Q4)", variant="primary", scale=1)
324
+
325
+ with gr.Column(visible=False) as file_panel:
326
+ gr.Markdown("**Select Quantization:**")
327
+ file_list_hub = gr.Dataframe(
328
+ headers=["Filename", "Quant", "Recommended"],
329
+ interactive=False,
330
+ row_count=6
331
+ )
332
+ selected_file = gr.Textbox(label="Selected File", interactive=False)
333
+ with gr.Row():
334
+ btn_download_file = gr.Button("Download This File", variant="primary")
335
+ btn_close_files = gr.Button("Close")
336
+
337
+ action_status = gr.Textbox(label="Status", interactive=False)
338
+
339
+
340
+ # FILE MANAGER
341
+ with gr.Tab("Files"):
342
+ with gr.Row():
343
+ btn_up = gr.Button("Up", size="sm")
344
+ btn_home_fm = gr.Button("Home", size="sm")
345
+ btn_refresh_fm = gr.Button("Refresh", size="sm")
346
+ btn_new_folder = gr.Button("+ Folder", size="sm")
347
+ btn_new_file = gr.Button("+ File", size="sm")
348
+
349
+ breadcrumb = gr.Textbox(value=get_breadcrumb(), label="", interactive=False)
350
+
351
+ with gr.Row():
352
+ with gr.Column(scale=1):
353
+ folder_tree = gr.Dataframe(
354
+ headers=["", "Name", "Path"],
355
+ value=get_quick_access_folders(),
356
+ interactive=False,
357
+ row_count=10,
358
+ show_label=False
359
+ )
360
+
361
+ with gr.Column(scale=3):
362
+ search_input = gr.Textbox(placeholder="Search...", show_label=False)
363
+ file_list = gr.Dataframe(
364
+ headers=["", "Name", "Type", "Size", "Modified"],
365
+ value=get_folder_contents(),
366
+ interactive=False,
367
+ row_count=10,
368
+ show_label=False
369
+ )
370
+ upload_area = gr.File(label="Upload", file_count="multiple")
371
+
372
+ with gr.Column(scale=1):
373
+ selected_item = gr.Textbox(label="Selected", interactive=False)
374
+ preview_txt = gr.TextArea(label="Preview", lines=6, interactive=False)
375
+ with gr.Row():
376
+ btn_delete_fm = gr.Button("Delete", size="sm", variant="stop")
377
+ btn_download_fm = gr.Button("Download", size="sm")
378
+
379
+ with gr.Row():
380
+ new_name = gr.Textbox(label="Name", scale=2)
381
+ new_content = gr.TextArea(label="Content", lines=2, scale=3)
382
+
383
+ status_bar = gr.Textbox(value=get_status(), label="", interactive=False)
384
+
385
+
386
+ # SYSTEM
387
+ with gr.Tab("System"):
388
+ with gr.Row():
389
+ with gr.Column():
390
+ gr.Markdown("### System Info")
391
+ btn_sys_refresh = gr.Button("Refresh")
392
+ sys_info = gr.Code(value=get_system_dashboard(), language=None)
393
+
394
+ with gr.Column():
395
+ gr.Markdown("### Storage")
396
+ btn_storage_refresh = gr.Button("Refresh")
397
+ storage_info = gr.Code(value=get_storage_stats(), language="json")
398
+
399
+
400
+ # LOGS
401
+ with gr.Tab("Logs"):
402
+ with gr.Row():
403
+ log_type = gr.Dropdown(
404
+ choices=["all", "events", "errors"],
405
+ value="all",
406
+ label="Type"
407
+ )
408
+ log_limit = gr.Number(value=50, label="Limit")
409
+ btn_refresh_logs = gr.Button("Refresh", variant="primary")
410
+ btn_clear_logs = gr.Button("Clear", variant="stop")
411
+
412
+ logs_display = gr.TextArea(
413
+ value=get_logs_display("all", 50),
414
+ lines=20,
415
+ interactive=False
416
+ )
417
+
418
+
419
+ # API
420
+ with gr.Tab("API"):
421
+ gr.Markdown("### API Endpoints")
422
+ api_list = gr.Code(value=get_api_list(), language="json")
423
+
424
+ gr.Markdown("### Test")
425
+ btn_ping = gr.Button("Ping", variant="primary")
426
+ ping_result = gr.JSON()
427
+
428
+ gr.Markdown("### Quick Logs (Errors)")
429
+ btn_get_errors = gr.Button("Get Recent Errors")
430
+ error_logs = gr.Code(language="json")
431
+
432
+
433
+ # ══════════════════════════════════════════════════════════════════════
434
+ # TAB 3: ABOUT
435
+ # ══════════════════════════════════════════════════════════════════════
436
+ with gr.Tab("About", id=2):
437
+ gr.Markdown("""
438
+ # TextAI v2
439
+
440
+ **AI-Powered Text Generation**
441
+
442
+ Features:
443
+ - Chat with local LLM models (GGUF)
444
+ - Model management with HF Hub integration
445
+ - Auto-download models
446
+ - File manager
447
+ - System monitoring
448
+
449
+ ---
450
+
451
+ **Keyboard Shortcuts:**
452
+ - `Enter` - Send message
453
+ - `Shift+Enter` - New line
454
+
455
+ ---
456
+
457
+ Built with Gradio | Models from HuggingFace
458
+ """)
459
+
460
+
461
+ # ══════════════════════════════════════════════════════════════════════════
462
+ # EVENT HANDLERS
463
+ # ══════════════════════════════════════════════════════════════════════════
464
+
465
+ if TEXTAI_AVAILABLE:
466
+ # New Chat
467
+ def handle_new_chat(is_rp):
468
+ mode = "roleplay" if is_rp else "chat"
469
+ sid, hist, chats, title = ui_new_chat(mode)
470
+ return sid, hist, chats, title, get_current_model_display()
471
+
472
+ btn_new_chat.click(
473
+ handle_new_chat,
474
+ inputs=[roleplay_toggle],
475
+ outputs=[current_session_id, chatbot, chat_list, chat_title, model_display]
476
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
477
 
478
+ # Load Session
479
+ def handle_load_session(evt: gr.SelectData, sessions_data):
480
+ sid, hist, title, is_rp = ui_load_session(evt, sessions_data)
481
+ return sid, hist, title, is_rp
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
482
 
483
+ chat_list.select(
484
+ handle_load_session,
485
+ inputs=[chat_list],
486
+ outputs=[current_session_id, chatbot, chat_title, roleplay_toggle]
487
+ )
 
 
 
 
 
488
 
489
+ # Send Message
490
+ def handle_send(sid, msg, hist, tokens, temp, is_rp):
491
+ for result in ui_send_message(sid, msg, hist, tokens, temp, is_rp):
492
+ hist_out, _, sid_out, chats = result
493
+ yield sid_out, hist_out, "", chats, get_current_model_display()
494
 
495
+ btn_send.click(
496
+ handle_send,
497
+ inputs=[current_session_id, chat_input, chatbot, max_tokens, temperature, roleplay_toggle],
498
+ outputs=[current_session_id, chatbot, chat_input, chat_list, model_display]
499
+ )
500
 
501
+ chat_input.submit(
502
+ handle_send,
503
+ inputs=[current_session_id, chat_input, chatbot, max_tokens, temperature, roleplay_toggle],
504
+ outputs=[current_session_id, chatbot, chat_input, chat_list, model_display]
505
+ )
506
 
507
+ # Menu Actions
508
+ def handle_menu(choice, sid):
509
+ if choice == "Rename Chat":
510
+ return gr.update(visible=True), gr.update(visible=False)
511
+ elif choice == "Switch Model":
512
+ return gr.update(visible=False), gr.update(visible=True)
513
+ elif choice == "Delete Chat":
514
+ if sid:
515
+ session_manager.delete_session(sid)
516
+ return gr.update(visible=False), gr.update(visible=False)
517
+ elif choice == "Clear Chat":
518
+ if sid:
519
+ session_manager.clear_session(sid)
520
+ return gr.update(visible=False), gr.update(visible=False)
521
+ return gr.update(visible=False), gr.update(visible=False)
522
+
523
+ menu_dropdown.change(
524
+ handle_menu,
525
+ inputs=[menu_dropdown, current_session_id],
526
+ outputs=[rename_panel, model_panel]
527
+ )
528
 
529
+ # Rename
530
+ def handle_rename(sid, new_title):
531
+ chats, title = ui_rename_session(sid, new_title)
532
+ return chats, title, gr.update(visible=False)
 
 
 
533
 
534
+ btn_rename.click(
535
+ handle_rename,
536
+ inputs=[current_session_id, rename_input],
537
+ outputs=[chat_list, chat_title, rename_panel]
538
+ )
539
 
540
+ btn_cancel_rename.click(
541
+ lambda: gr.update(visible=False),
542
+ outputs=[rename_panel]
543
+ )
 
 
544
 
545
+ # Model Panel
546
+ btn_close_model.click(
547
+ lambda: gr.update(visible=False),
548
+ outputs=[model_panel]
549
+ )
550
 
551
+ def handle_quick_load_model(choice):
552
+ if not choice or choice == "No models available":
553
+ return get_current_model_display(), gr.update(visible=False)
554
+ name = choice.replace("βœ“ ", "").split(" (")[0]
555
+ models = model_manager.get_available_models()
556
+ for m in models:
557
+ if m["name"] == name:
558
+ model_manager.load_model(m["id"])
559
+ break
560
+ return get_current_model_display(), gr.update(visible=False)
561
+
562
+ btn_load_model.click(
563
+ handle_quick_load_model,
564
+ inputs=[model_choices],
565
+ outputs=[model_display, model_panel]
566
+ )
567
 
568
+ def refresh_model_choices():
569
+ choices = get_model_choices()
570
+ return gr.update(choices=choices)
 
 
 
571
 
572
+ btn_refresh_model_list.click(
573
+ refresh_model_choices,
574
+ outputs=[model_choices]
575
+ )
576
 
577
+ # Model Manager (Tools tab)
578
+ btn_refresh_models.click(
579
+ lambda: (get_models_table(), get_current_model_display()),
580
+ outputs=[models_table, current_model_text]
581
+ )
582
 
583
+ models_table.select(
584
+ ui_load_model_by_index,
585
+ inputs=[models_table],
586
+ outputs=[models_table, current_model_text, model_status]
587
+ )
 
 
588
 
589
+ btn_unload_model.click(
590
+ ui_unload_model,
591
+ outputs=[models_table, current_model_text, model_status]
592
+ )
593
 
594
+ # Install Default Model
595
+ def handle_install_default():
596
+ result = download_default_model()
597
+ return result, get_models_table(), get_current_model_display()
 
 
 
598
 
599
+ btn_install_default.click(
600
+ handle_install_default,
601
+ outputs=[install_status, models_table, current_model_text]
602
+ )
 
 
 
 
 
 
 
 
 
603
 
604
 
605
+ # File Manager Events
606
+ folder_tree.select(on_folder_tree_select, [folder_tree], [file_list, breadcrumb, status_bar, folder_tree])
607
+ file_list.select(
608
+ navigate_from_main,
609
+ [file_list],
610
+ [file_list, breadcrumb, status_bar, folder_tree, selected_item, gr.State(None), preview_txt, gr.State("")]
611
+ )
612
+ btn_up.click(go_up, outputs=[file_list, breadcrumb, status_bar, folder_tree])
613
+ btn_home_fm.click(go_home, outputs=[file_list, breadcrumb, status_bar, folder_tree])
614
+ btn_refresh_fm.click(refresh_all, outputs=[file_list, breadcrumb, status_bar, folder_tree])
615
+ btn_new_folder.click(ui_create_folder, [new_name], [file_list, breadcrumb, status_bar, folder_tree])
616
+ btn_new_file.click(ui_create_file, [new_name, new_content], [file_list, breadcrumb, status_bar, folder_tree])
617
+ btn_delete_fm.click(ui_delete, [selected_item], [file_list, breadcrumb, status_bar, folder_tree])
618
+ upload_area.change(upload_files, [upload_area], [file_list, breadcrumb, status_bar, folder_tree])
619
+ search_input.submit(search_files, [search_input], [file_list, breadcrumb, status_bar])
620
+
621
+
622
+ # HF Hub Events
623
+ def get_installed_models_list():
624
+ rows = []
625
+ try:
626
+ if not TEXT_MODELS_DIR.exists():
627
+ return rows
628
+ for d in TEXT_MODELS_DIR.iterdir():
629
+ if d.is_dir():
630
+ gguf_files = list(d.glob("*.gguf"))
631
+ if gguf_files:
632
+ for f in gguf_files:
633
+ size = format_size(f.stat().st_size)
634
+ rows.append([f.stem, "GGUF", size, "Ready"])
635
+ elif (d / "config.json").exists():
636
+ total_size = sum(f.stat().st_size for f in d.rglob("*") if f.is_file())
637
+ rows.append([d.name, "Transformers", format_size(total_size), "Ready"])
638
+ for f in TEXT_MODELS_DIR.glob("*.gguf"):
639
+ size = format_size(f.stat().st_size)
640
+ rows.append([f.stem, "GGUF", size, "Ready"])
641
+ except Exception as e:
642
+ logger.error("HFHub", f"List installed error: {str(e)}")
643
+ return rows
644
+
645
+ def refresh_installed():
646
+ rows = get_installed_models_list()
647
+ choices = get_model_choices() if TEXTAI_AVAILABLE else []
648
+ return rows, gr.update(choices=choices)
649
+
650
+ btn_refresh_installed.click(
651
+ refresh_installed,
652
+ outputs=[installed_models_table, model_choices]
653
+ )
654
 
655
+ def select_installed_model(evt: gr.SelectData, data):
656
+ try:
657
+ row_idx = evt.index[0] if isinstance(evt.index, (list, tuple)) else evt.index
658
+ if hasattr(data, 'values'):
659
+ data_list = data.values.tolist()
660
+ else:
661
+ data_list = data
662
+ if data_list and row_idx < len(data_list):
663
+ return data_list[row_idx][0]
664
+ except Exception as e:
665
+ logger.error("HFHub", f"Select installed error: {str(e)}")
666
+ return ""
667
 
668
+ installed_models_table.select(
669
+ select_installed_model,
670
+ [installed_models_table],
671
+ [selected_installed_model]
672
+ )
 
673
 
674
+ def delete_installed_model(model_name):
675
+ if not model_name:
676
+ return get_installed_models_list(), "", "Select a model first", gr.update()
677
+ result = remove_model(model_name)
678
+ rows = get_installed_models_list()
679
+ choices = get_model_choices() if TEXTAI_AVAILABLE else []
680
+ return rows, "", result, gr.update(choices=choices)
681
+
682
+ btn_delete_installed.click(
683
+ delete_installed_model,
684
+ inputs=[selected_installed_model],
685
+ outputs=[installed_models_table, selected_installed_model, action_status, model_choices]
686
+ )
687
 
688
+ def handle_hf_search(query, max_params_str):
689
+ max_params = float(max_params_str) if max_params_str != "0" else None
690
+ rows, status, total = search_hf_models_enhanced(
691
+ query=query,
692
+ task="text-generation",
693
+ library="gguf",
694
+ sort="downloads",
695
+ max_params=max_params,
696
+ recommended_only=False,
697
+ limit=30,
698
+ offset=0
699
+ )
700
+ simple_rows = [[r[0], r[1], r[2], r[3], r[4]] for r in rows]
701
+ return simple_rows, status
702
 
703
+ btn_hf_search.click(
704
+ handle_hf_search,
705
+ [hf_query, hf_max_params],
706
+ [hf_results, hf_status]
707
+ )
708
 
709
+ hf_query.submit(
710
+ handle_hf_search,
711
+ [hf_query, hf_max_params],
712
+ [hf_results, hf_status]
713
+ )
714
 
715
+ def handle_select_model(evt: gr.SelectData, results_data):
716
+ try:
717
+ row_idx = evt.index[0] if isinstance(evt.index, (list, tuple)) else evt.index
718
+ if hasattr(results_data, 'values'):
719
+ data_list = results_data.values.tolist()
720
+ elif hasattr(results_data, 'tolist'):
721
+ data_list = results_data.tolist()
722
+ else:
723
+ data_list = results_data
724
+ if data_list and row_idx < len(data_list):
725
+ return data_list[row_idx][0]
726
+ except Exception as e:
727
+ logger.error("HFHub", f"Select error: {str(e)}")
728
+ return ""
729
 
730
+ hf_results.select(handle_select_model, [hf_results], [selected_model_id])
 
 
731
 
732
+ def handle_view_files(model_id):
733
+ if not model_id:
734
+ return gr.update(visible=False), [], "", "Select a model first"
735
+ files = get_model_files(model_id)
736
+ if not files:
737
+ return gr.update(visible=False), [], "", "No GGUF files found"
738
+ rows = []
739
+ for f in files:
740
+ rec = "βœ“" if f["recommended"] else ""
741
+ rows.append([f["filename"], f["quant"], rec])
742
+ rows.sort(key=lambda x: (x[2] != "βœ“", x[0]))
743
+ return gr.update(visible=True), rows, "", f"Found {len(files)} files"
744
+
745
+ btn_view_files.click(
746
+ handle_view_files,
747
+ [selected_model_id],
748
+ [file_panel, file_list_hub, selected_file, action_status]
749
+ )
750
 
751
+ def handle_select_file(evt: gr.SelectData, files_data):
752
+ try:
753
+ row_idx = evt.index[0] if isinstance(evt.index, (list, tuple)) else evt.index
754
+ if hasattr(files_data, 'values'):
755
+ data_list = files_data.values.tolist()
756
+ elif hasattr(files_data, 'tolist'):
757
+ data_list = files_data.tolist()
758
+ else:
759
+ data_list = files_data
760
+ if data_list and row_idx < len(data_list):
761
+ return data_list[row_idx][0]
762
+ except Exception as e:
763
+ logger.error("HFHub", f"File select error: {str(e)}")
764
+ return ""
765
 
766
+ file_list_hub.select(handle_select_file, [file_list_hub], [selected_file])
 
 
 
767
 
768
+ def handle_download_file(model_id, filename):
769
+ if not model_id or not filename:
770
+ return "Select model and file"
771
+ result = download_model_file(model_id, filename)
772
+ return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
773
 
774
+ btn_download_file.click(
775
+ handle_download_file,
776
+ [selected_model_id, selected_file],
777
+ [action_status]
 
 
 
 
 
 
 
 
 
 
778
  )
779
 
780
+ btn_close_files.click(
781
+ lambda: gr.update(visible=False),
782
+ outputs=[file_panel]
 
 
 
783
  )
784
 
785
+ def handle_auto_add(model_id):
786
+ if not model_id:
787
+ return "Select a model first", get_installed_models_list(), gr.update()
788
+ files = get_model_files(model_id)
789
+ if not files:
790
+ return "No GGUF files found for this model", get_installed_models_list(), gr.update()
791
+
792
+ best_file = None
793
+ priority = ["Q4_K_M", "Q4_K_S", "Q5_K_M", "Q4_0", "Q5_0"]
794
+ for quant in priority:
795
+ for f in files:
796
+ if f["quant"] == quant:
797
+ best_file = f["filename"]
798
+ break
799
+ if best_file:
800
+ break
801
+
802
+ if not best_file and files:
803
+ best_file = files[0]["filename"]
804
+
805
+ if best_file:
806
+ result = download_model_file(model_id, best_file)
807
+ installed = get_installed_models_list()
808
+ choices = get_model_choices() if TEXTAI_AVAILABLE else []
809
+ return result, installed, gr.update(choices=choices)
810
+
811
+ return "Could not find suitable file", get_installed_models_list(), gr.update()
812
+
813
+ btn_auto_add.click(
814
+ handle_auto_add,
815
+ [selected_model_id],
816
+ [action_status, installed_models_table, model_choices]
817
  )
818
 
 
 
 
 
 
 
 
 
 
 
 
 
819
 
820
+ # System
821
+ btn_sys_refresh.click(get_system_dashboard, outputs=[sys_info])
822
+ btn_storage_refresh.click(get_storage_stats, outputs=[storage_info])
 
 
 
 
823
 
824
+ # Logs
825
+ btn_refresh_logs.click(ui_refresh_logs, [log_type, log_limit], [logs_display])
826
+ btn_clear_logs.click(ui_clear_logs, [log_type], [logs_display, gr.State("")])
 
 
 
 
827
 
828
+ # API
829
+ btn_ping.click(ping, outputs=[ping_result])
830
+ btn_get_errors.click(lambda: api_get_logs("errors", 50), outputs=[error_logs])
 
 
 
 
831
 
 
 
 
 
 
 
 
 
 
 
 
 
 
832
 
833
+ # ══════════════════════════════════════════════════════════════════════════════
834
+ # LAUNCH
835
+ # ══════════════════════════════════════════════════════════════════════════════
836
 
 
 
 
837
  if __name__ == "__main__":
838
+ logger.info("System", "TextAI v2 starting", {"version": VERSION})
839
+ demo.launch(theme=get_theme(), css=DARK_CSS + CHAT_CSS)
 
 
 
 
modules/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Modules package
modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (167 Bytes). View file
 
modules/__pycache__/api.cpython-310.pyc ADDED
Binary file (3.78 kB). View file
 
modules/__pycache__/config.cpython-310.pyc ADDED
Binary file (648 Bytes). View file
 
modules/__pycache__/events.cpython-310.pyc ADDED
Binary file (8.78 kB). View file
 
modules/__pycache__/file_manager.cpython-310.pyc ADDED
Binary file (9.54 kB). View file
 
modules/__pycache__/hf_hub.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
modules/__pycache__/logger.cpython-310.pyc ADDED
Binary file (4.76 kB). View file
 
modules/__pycache__/system.cpython-310.pyc ADDED
Binary file (4.2 kB). View file
 
modules/__pycache__/text_ai.cpython-310.pyc ADDED
Binary file (22 kB). View file
 
modules/__pycache__/text_ui.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
modules/__pycache__/theme.cpython-310.pyc ADDED
Binary file (3.15 kB). View file
 
modules/__pycache__/ui_helpers.cpython-310.pyc ADDED
Binary file (4.97 kB). View file
 
modules/__pycache__/utils.cpython-310.pyc ADDED
Binary file (2 kB). View file
 
modules/api.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ API Module
3
+ All API endpoints for external access.
4
+ """
5
+ import json
6
+ from datetime import datetime
7
+
8
+ from .config import VERSION
9
+ from .logger import logger
10
+ from .events import event_manager
11
+ from .file_manager import create_folder, create_file, read_file, write_file, delete_item, rename_item
12
+ from .system import get_system_info, get_space_info, get_storage_stats
13
+
14
+
15
+ def ping() -> dict:
16
+ """Health check endpoint"""
17
+ return {"status": "ok", "version": VERSION, "timestamp": datetime.now().isoformat()}
18
+
19
+
20
+ def get_api_list() -> str:
21
+ """List all available API endpoints"""
22
+ return json.dumps({
23
+ "system": ["ping()", "get_system_info()", "get_system_dashboard()", "get_space_info()", "get_storage_stats()"],
24
+ "files": ["create_folder(name)", "create_file(name,content)", "read_file(name)", "write_file(name,content)",
25
+ "delete_item(name)", "rename_item(old,new)", "search_files(query)"],
26
+ "hub": ["search_hf_models(query,task,library,sort)", "download_model_file(model_id,filename)",
27
+ "download_from_url(url,filename)", "list_downloaded_models()"],
28
+ "events": ["api_create_event(type,path,hours,enabled,desc)", "api_delete_event(id)",
29
+ "api_toggle_event(id)", "api_run_event(id)", "api_get_events()"],
30
+ "logs": ["api_get_logs(type,limit,level,category)", "api_clear_logs(type)", "api_export_logs(type)"]
31
+ }, indent=2)
32
+
33
+
34
+ # Event API
35
+ def api_create_event(event_type: str, target_path: str, interval_hours: float, enabled: bool, description: str) -> str:
36
+ """Create a scheduled event"""
37
+ try:
38
+ event_id = event_manager.create_event(
39
+ event_type=event_type,
40
+ target_path=target_path,
41
+ interval_hours=float(interval_hours),
42
+ enabled=enabled,
43
+ description=description
44
+ )
45
+ return json.dumps({"success": True, "event_id": event_id})
46
+ except Exception as e:
47
+ return json.dumps({"success": False, "error": str(e)})
48
+
49
+
50
+ def api_delete_event(event_id: str) -> str:
51
+ """Delete a scheduled event"""
52
+ success = event_manager.delete_event(event_id)
53
+ return json.dumps({"success": success})
54
+
55
+
56
+ def api_toggle_event(event_id: str) -> str:
57
+ """Toggle event enabled status"""
58
+ success = event_manager.toggle_event(event_id)
59
+ if success and event_id in event_manager.events:
60
+ return json.dumps({"success": True, "enabled": event_manager.events[event_id].enabled})
61
+ return json.dumps({"success": False})
62
+
63
+
64
+ def api_run_event(event_id: str) -> str:
65
+ """Run event immediately"""
66
+ success = event_manager.run_now(event_id)
67
+ return json.dumps({"success": success})
68
+
69
+
70
+ def api_get_events() -> str:
71
+ """Get all scheduled events"""
72
+ return json.dumps({"events": event_manager.get_events()}, indent=2)
73
+
74
+
75
+ # Log API
76
+ def api_get_logs(log_type: str = "all", limit: int = 100, level: str = "", category: str = "") -> str:
77
+ """Get logs"""
78
+ logs = logger.get_logs(
79
+ log_type=log_type,
80
+ limit=int(limit),
81
+ level=level if level else None,
82
+ category=category if category else None
83
+ )
84
+ return json.dumps({"count": len(logs), "logs": logs}, indent=2)
85
+
86
+
87
+ def api_clear_logs(log_type: str = "all") -> str:
88
+ """Clear logs"""
89
+ success = logger.clear_logs(log_type)
90
+ return json.dumps({"success": success})
91
+
92
+
93
+ def api_export_logs(log_type: str = "all") -> str:
94
+ """Export logs"""
95
+ return logger.export_logs(log_type)
modules/config.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Configuration Module
3
+ Central configuration for the entire application.
4
+ """
5
+ import os
6
+ from pathlib import Path
7
+
8
+ # Storage paths
9
+ STORAGE_DIR = Path("/tmp/space_storage")
10
+ MODELS_DIR = STORAGE_DIR / "models"
11
+ DOWNLOADS_DIR = STORAGE_DIR / "downloads"
12
+ LOGS_DIR = STORAGE_DIR / "logs"
13
+ UPLOADS_DIR = STORAGE_DIR / "uploads"
14
+
15
+ # Create directories
16
+ for d in [STORAGE_DIR, MODELS_DIR, DOWNLOADS_DIR, LOGS_DIR, UPLOADS_DIR]:
17
+ d.mkdir(parents=True, exist_ok=True)
18
+
19
+ # API config
20
+ HF_API_URL = "https://huggingface.co/api"
21
+ HF_TOKEN = os.getenv("HF_TOKEN", "")
22
+
23
+ # App version
24
+ VERSION = "5.0.0"
modules/events.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Event Management Module
3
+ Scheduled events, auto-cleanup, and session management.
4
+ """
5
+ import json
6
+ import shutil
7
+ import threading
8
+ import time
9
+ import uuid
10
+ from pathlib import Path
11
+ from datetime import datetime, timedelta
12
+ from typing import Dict, List, Optional
13
+ from dataclasses import dataclass, asdict
14
+
15
+ from .config import STORAGE_DIR, LOGS_DIR, UPLOADS_DIR
16
+ from .logger import logger
17
+
18
+
19
+ @dataclass
20
+ class ScheduledEvent:
21
+ event_id: str
22
+ event_type: str
23
+ target_path: str
24
+ schedule_type: str
25
+ interval_hours: float
26
+ next_run: str
27
+ enabled: bool
28
+ created_at: str
29
+ last_run: Optional[str] = None
30
+ run_count: int = 0
31
+ description: str = ""
32
+
33
+
34
+ class EventManager:
35
+ """Manages scheduled events, auto-cleanup, and session events"""
36
+
37
+ def __init__(self, storage_dir: Path):
38
+ self.storage_dir = storage_dir
39
+ self.events_file = LOGS_DIR / "scheduled_events.json"
40
+ self.sessions_file = LOGS_DIR / "active_sessions.json"
41
+ self._scheduler_thread = None
42
+ self._running = False
43
+ self._lock = threading.Lock()
44
+
45
+ self.events: Dict[str, ScheduledEvent] = self._load_events()
46
+ self.start_scheduler()
47
+
48
+ def _load_events(self) -> Dict[str, ScheduledEvent]:
49
+ if not self.events_file.exists():
50
+ return {}
51
+ try:
52
+ data = json.loads(self.events_file.read_text())
53
+ return {k: ScheduledEvent(**v) for k, v in data.items()}
54
+ except:
55
+ return {}
56
+
57
+ def _save_events(self):
58
+ with self._lock:
59
+ data = {k: asdict(v) for k, v in self.events.items()}
60
+ self.events_file.write_text(json.dumps(data, indent=2))
61
+
62
+ def create_event(
63
+ self,
64
+ event_type: str,
65
+ target_path: str,
66
+ schedule_type: str = "interval",
67
+ interval_hours: float = 3.0,
68
+ enabled: bool = True,
69
+ description: str = ""
70
+ ) -> str:
71
+ event_id = str(uuid.uuid4())[:8]
72
+ next_run = (datetime.now() + timedelta(hours=interval_hours)).isoformat()
73
+
74
+ event = ScheduledEvent(
75
+ event_id=event_id,
76
+ event_type=event_type,
77
+ target_path=target_path,
78
+ schedule_type=schedule_type,
79
+ interval_hours=interval_hours,
80
+ next_run=next_run,
81
+ enabled=enabled,
82
+ created_at=datetime.now().isoformat(),
83
+ description=description or f"{event_type} on {target_path}"
84
+ )
85
+
86
+ self.events[event_id] = event
87
+ self._save_events()
88
+
89
+ logger.event("EventManager", f"Created event: {event_type}", {
90
+ "event_id": event_id,
91
+ "target": target_path,
92
+ "interval_hours": interval_hours
93
+ })
94
+
95
+ return event_id
96
+
97
+ def delete_event(self, event_id: str) -> bool:
98
+ if event_id in self.events:
99
+ del self.events[event_id]
100
+ self._save_events()
101
+ logger.event("EventManager", f"Deleted event: {event_id}")
102
+ return True
103
+ return False
104
+
105
+ def toggle_event(self, event_id: str) -> bool:
106
+ if event_id in self.events:
107
+ self.events[event_id].enabled = not self.events[event_id].enabled
108
+ self._save_events()
109
+ status = "enabled" if self.events[event_id].enabled else "disabled"
110
+ logger.event("EventManager", f"Event {event_id} {status}")
111
+ return True
112
+ return False
113
+
114
+ def get_events(self) -> List[Dict]:
115
+ return [asdict(e) for e in self.events.values()]
116
+
117
+ def execute_event(self, event: ScheduledEvent) -> bool:
118
+ try:
119
+ target = Path(event.target_path)
120
+
121
+ if event.event_type == "delete_file":
122
+ if target.exists() and target.is_file():
123
+ target.unlink()
124
+ logger.event("Cleanup", f"Deleted file: {target.name}")
125
+
126
+ elif event.event_type == "delete_folder":
127
+ if target.exists() and target.is_dir():
128
+ shutil.rmtree(target)
129
+ logger.event("Cleanup", f"Deleted folder: {target.name}")
130
+
131
+ elif event.event_type == "cleanup_uploads":
132
+ cutoff = datetime.now() - timedelta(hours=event.interval_hours)
133
+ deleted = 0
134
+ for f in UPLOADS_DIR.iterdir():
135
+ if f.is_file() and datetime.fromtimestamp(f.stat().st_mtime) < cutoff:
136
+ f.unlink()
137
+ deleted += 1
138
+ logger.event("Cleanup", f"Cleaned {deleted} old uploads")
139
+
140
+ elif event.event_type == "cleanup_old_files":
141
+ if target.exists() and target.is_dir():
142
+ cutoff = datetime.now() - timedelta(hours=event.interval_hours)
143
+ deleted = 0
144
+ for f in target.rglob("*"):
145
+ if f.is_file() and datetime.fromtimestamp(f.stat().st_mtime) < cutoff:
146
+ f.unlink()
147
+ deleted += 1
148
+ logger.event("Cleanup", f"Cleaned {deleted} old files in {target.name}")
149
+
150
+ elif event.event_type == "cleanup_empty_folders":
151
+ if target.exists() and target.is_dir():
152
+ deleted = 0
153
+ for d in sorted(target.rglob("*"), reverse=True):
154
+ if d.is_dir() and not any(d.iterdir()):
155
+ d.rmdir()
156
+ deleted += 1
157
+ logger.event("Cleanup", f"Removed {deleted} empty folders")
158
+
159
+ elif event.event_type == "cleanup_logs":
160
+ logger.clear_logs("all")
161
+ logger.event("Cleanup", "Logs rotated")
162
+
163
+ elif event.event_type == "cleanup_temp":
164
+ temp_patterns = ["*.tmp", "*.temp", ".DS_Store", "Thumbs.db"]
165
+ deleted = 0
166
+ for pattern in temp_patterns:
167
+ for f in self.storage_dir.rglob(pattern):
168
+ f.unlink()
169
+ deleted += 1
170
+ logger.event("Cleanup", f"Cleaned {deleted} temp files")
171
+
172
+ event.last_run = datetime.now().isoformat()
173
+ event.run_count += 1
174
+ if event.schedule_type == "interval":
175
+ event.next_run = (datetime.now() + timedelta(hours=event.interval_hours)).isoformat()
176
+ elif event.schedule_type == "once":
177
+ event.enabled = False
178
+
179
+ self._save_events()
180
+ return True
181
+
182
+ except Exception as e:
183
+ logger.error("EventManager", f"Event execution failed: {event.event_id}", {"error": str(e)})
184
+ return False
185
+
186
+ def _scheduler_loop(self):
187
+ while self._running:
188
+ try:
189
+ now = datetime.now()
190
+ for event_id, event in list(self.events.items()):
191
+ if not event.enabled:
192
+ continue
193
+
194
+ next_run = datetime.fromisoformat(event.next_run)
195
+ if now >= next_run:
196
+ self.execute_event(event)
197
+ except Exception as e:
198
+ logger.error("Scheduler", f"Scheduler error: {str(e)}")
199
+
200
+ time.sleep(60)
201
+
202
+ def start_scheduler(self):
203
+ if self._scheduler_thread is None or not self._scheduler_thread.is_alive():
204
+ self._running = True
205
+ self._scheduler_thread = threading.Thread(target=self._scheduler_loop, daemon=True)
206
+ self._scheduler_thread.start()
207
+ logger.info("Scheduler", "Scheduler started")
208
+
209
+ def stop_scheduler(self):
210
+ self._running = False
211
+ logger.info("Scheduler", "Scheduler stopped")
212
+
213
+ def run_now(self, event_id: str) -> bool:
214
+ if event_id in self.events:
215
+ return self.execute_event(self.events[event_id])
216
+ return False
217
+
218
+ # Session management
219
+ def register_session(self, session_id: str) -> str:
220
+ sessions = self._load_sessions()
221
+ sessions[session_id] = {
222
+ "created_at": datetime.now().isoformat(),
223
+ "last_activity": datetime.now().isoformat(),
224
+ "uploads": []
225
+ }
226
+ self._save_sessions(sessions)
227
+ logger.event("Session", f"New session: {session_id[:8]}...")
228
+ return session_id
229
+
230
+ def end_session(self, session_id: str):
231
+ sessions = self._load_sessions()
232
+ if session_id in sessions:
233
+ for upload in sessions[session_id].get("uploads", []):
234
+ try:
235
+ Path(upload).unlink(missing_ok=True)
236
+ except:
237
+ pass
238
+ del sessions[session_id]
239
+ self._save_sessions(sessions)
240
+ logger.event("Session", f"Session ended: {session_id[:8]}...")
241
+
242
+ def track_upload(self, session_id: str, file_path: str):
243
+ sessions = self._load_sessions()
244
+ if session_id in sessions:
245
+ sessions[session_id]["uploads"].append(file_path)
246
+ sessions[session_id]["last_activity"] = datetime.now().isoformat()
247
+ self._save_sessions(sessions)
248
+
249
+ def _load_sessions(self) -> Dict:
250
+ if not self.sessions_file.exists():
251
+ return {}
252
+ try:
253
+ return json.loads(self.sessions_file.read_text())
254
+ except:
255
+ return {}
256
+
257
+ def _save_sessions(self, sessions: Dict):
258
+ self.sessions_file.write_text(json.dumps(sessions, indent=2))
259
+
260
+
261
+ # Global event manager instance
262
+ event_manager = EventManager(STORAGE_DIR)
263
+
264
+ # Create default events if none exist
265
+ if not event_manager.events:
266
+ event_manager.create_event(
267
+ event_type="cleanup_uploads",
268
+ target_path=str(UPLOADS_DIR),
269
+ interval_hours=3.0,
270
+ description="Auto-cleanup uploads every 3 hours"
271
+ )
272
+ event_manager.create_event(
273
+ event_type="cleanup_temp",
274
+ target_path=str(STORAGE_DIR),
275
+ interval_hours=6.0,
276
+ description="Clean temp files every 6 hours"
277
+ )
modules/file_manager.py ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ File Manager Module
3
+ File and folder operations with navigation.
4
+ """
5
+ import json
6
+ import shutil
7
+ from pathlib import Path
8
+ from datetime import datetime
9
+ from typing import List, Tuple, Optional
10
+
11
+ from .config import STORAGE_DIR
12
+ from .utils import format_size, get_file_icon, get_mime_type, format_timestamp
13
+ from .logger import logger
14
+
15
+ # Global state
16
+ clipboard = {"files": [], "operation": None}
17
+ current_path = STORAGE_DIR
18
+
19
+
20
+ def get_quick_access_folders() -> List[List[str]]:
21
+ """Get list of folders for sidebar tree"""
22
+ folders = [["πŸ“‚", "Root", ""]]
23
+
24
+ def add_folders(path: Path, level: int = 0):
25
+ try:
26
+ dirs = sorted([d for d in path.iterdir() if d.is_dir()], key=lambda x: x.name.lower())
27
+ for d in dirs:
28
+ rel = str(d.relative_to(STORAGE_DIR))
29
+ indent = " " * level
30
+ is_current = "β–Ά" if d == current_path else " "
31
+ folders.append([is_current, f"{indent}πŸ“ {d.name}", rel])
32
+ add_folders(d, level + 1)
33
+ except:
34
+ pass
35
+
36
+ add_folders(STORAGE_DIR)
37
+
38
+ if current_path == STORAGE_DIR:
39
+ folders[0][0] = "β–Ά"
40
+
41
+ return folders
42
+
43
+
44
+ def get_folder_contents() -> List[List[str]]:
45
+ """Get contents of current folder"""
46
+ items = []
47
+
48
+ try:
49
+ entries = sorted(current_path.iterdir(), key=lambda x: (not x.is_dir(), x.name.lower()))
50
+ for f in entries:
51
+ is_dir = f.is_dir()
52
+ stat = f.stat()
53
+
54
+ if is_dir:
55
+ try:
56
+ count = len(list(f.iterdir()))
57
+ size_str = f"{count} items"
58
+ except:
59
+ size_str = ""
60
+ else:
61
+ size_str = format_size(stat.st_size)
62
+
63
+ items.append([
64
+ get_file_icon(f.name, is_dir),
65
+ f.name,
66
+ "Folder" if is_dir else (Path(f.name).suffix.upper()[1:] or "FILE"),
67
+ size_str,
68
+ format_timestamp(stat.st_mtime)
69
+ ])
70
+ except:
71
+ pass
72
+
73
+ return items
74
+
75
+
76
+ def get_breadcrumb() -> str:
77
+ """Get current path as breadcrumb"""
78
+ if current_path == STORAGE_DIR:
79
+ return "πŸ“‚ Root"
80
+
81
+ parts = current_path.relative_to(STORAGE_DIR).parts
82
+ breadcrumb = "πŸ“‚ Root"
83
+ for p in parts:
84
+ breadcrumb += f" β€Ί {p}"
85
+ return breadcrumb
86
+
87
+
88
+ def get_status() -> str:
89
+ """Get status bar info"""
90
+ try:
91
+ items = list(current_path.iterdir())
92
+ folders = len([i for i in items if i.is_dir()])
93
+ files = len([i for i in items if i.is_file()])
94
+ total_size = sum(i.stat().st_size for i in items if i.is_file())
95
+ return f"{folders} folders, {files} files | {format_size(total_size)}"
96
+ except:
97
+ return "Ready"
98
+
99
+
100
+ def navigate_to_folder(folder_path: str):
101
+ """Navigate to a specific folder"""
102
+ global current_path
103
+
104
+ if not folder_path or folder_path == "" or folder_path == "Root":
105
+ current_path = STORAGE_DIR
106
+ else:
107
+ new_path = STORAGE_DIR / folder_path
108
+ if new_path.exists() and new_path.is_dir():
109
+ current_path = new_path
110
+
111
+ return get_folder_contents(), get_breadcrumb(), get_status(), get_quick_access_folders()
112
+
113
+
114
+ def go_up():
115
+ """Navigate to parent folder"""
116
+ global current_path
117
+ if current_path != STORAGE_DIR:
118
+ current_path = current_path.parent
119
+ return get_folder_contents(), get_breadcrumb(), get_status(), get_quick_access_folders()
120
+
121
+
122
+ def go_home():
123
+ """Navigate to root"""
124
+ global current_path
125
+ current_path = STORAGE_DIR
126
+ return get_folder_contents(), get_breadcrumb(), get_status(), get_quick_access_folders()
127
+
128
+
129
+ def refresh_all():
130
+ """Refresh everything"""
131
+ return get_folder_contents(), get_breadcrumb(), get_status(), get_quick_access_folders()
132
+
133
+
134
+ # CRUD Operations
135
+ def create_folder(name: str) -> str:
136
+ if not name:
137
+ return json.dumps({"success": False, "error": "Name required"})
138
+ path = current_path / name
139
+ if path.exists():
140
+ return json.dumps({"success": False, "error": "Already exists"})
141
+ path.mkdir(parents=True)
142
+ logger.info("FileOps", f"Created folder: {name}")
143
+ return json.dumps({"success": True, "created": name})
144
+
145
+
146
+ def create_file(name: str, content: str = "") -> str:
147
+ if not name:
148
+ return json.dumps({"success": False, "error": "Name required"})
149
+ path = current_path / name
150
+ if path.exists():
151
+ return json.dumps({"success": False, "error": "Already exists"})
152
+ path.write_text(content)
153
+ logger.info("FileOps", f"Created file: {name}")
154
+ return json.dumps({"success": True, "created": name})
155
+
156
+
157
+ def read_file(name: str) -> str:
158
+ if not name:
159
+ return ""
160
+ path = current_path / name
161
+ if not path.exists() or path.is_dir():
162
+ return ""
163
+ try:
164
+ content = path.read_text()
165
+ return content[:100000] + ("\n...[truncated]" if len(content) > 100000 else "")
166
+ except:
167
+ return f"[Binary: {format_size(path.stat().st_size)}]"
168
+
169
+
170
+ def write_file(name: str, content: str) -> str:
171
+ if not name:
172
+ return json.dumps({"success": False, "error": "Name required"})
173
+ path = current_path / name
174
+ path.write_text(content)
175
+ logger.info("FileOps", f"Updated file: {name}")
176
+ return json.dumps({"success": True, "saved": name})
177
+
178
+
179
+ def delete_item(name: str) -> str:
180
+ if not name:
181
+ return json.dumps({"success": False, "error": "Name required"})
182
+ path = current_path / name
183
+ if not path.exists():
184
+ return json.dumps({"success": False, "error": "Not found"})
185
+ if path.is_dir():
186
+ shutil.rmtree(path)
187
+ else:
188
+ path.unlink()
189
+ logger.info("FileOps", f"Deleted: {name}")
190
+ return json.dumps({"success": True, "deleted": name})
191
+
192
+
193
+ def rename_item(old: str, new: str) -> str:
194
+ if not old or not new:
195
+ return json.dumps({"success": False, "error": "Names required"})
196
+ old_path = current_path / old
197
+ new_path = current_path / new
198
+ if not old_path.exists():
199
+ return json.dumps({"success": False, "error": "Not found"})
200
+ if new_path.exists():
201
+ return json.dumps({"success": False, "error": "Target exists"})
202
+ old_path.rename(new_path)
203
+ logger.info("FileOps", f"Renamed: {old} -> {new}")
204
+ return json.dumps({"success": True, "renamed": old, "to": new})
205
+
206
+
207
+ # Clipboard operations
208
+ def copy_to_clipboard(name: str) -> str:
209
+ global clipboard
210
+ if not name:
211
+ return "⚠️ Select item"
212
+ path = current_path / name
213
+ if not path.exists():
214
+ return "❌ Not found"
215
+ clipboard = {"files": [str(path)], "operation": "copy"}
216
+ return f"πŸ“‹ Copied: {name}"
217
+
218
+
219
+ def cut_to_clipboard(name: str) -> str:
220
+ global clipboard
221
+ if not name:
222
+ return "⚠️ Select item"
223
+ path = current_path / name
224
+ if not path.exists():
225
+ return "❌ Not found"
226
+ clipboard = {"files": [str(path)], "operation": "cut"}
227
+ return f"βœ‚οΈ Cut: {name}"
228
+
229
+
230
+ def paste_from_clipboard():
231
+ global clipboard
232
+ if not clipboard["files"]:
233
+ return get_folder_contents(), get_breadcrumb(), "⚠️ Clipboard empty", get_quick_access_folders()
234
+
235
+ pasted = []
236
+ for src in clipboard["files"]:
237
+ src_path = Path(src)
238
+ if not src_path.exists():
239
+ continue
240
+
241
+ dest = current_path / src_path.name
242
+ if dest.exists():
243
+ base, ext = src_path.stem, src_path.suffix
244
+ i = 1
245
+ while dest.exists():
246
+ dest = current_path / f"{base}_copy{i}{ext}"
247
+ i += 1
248
+
249
+ if clipboard["operation"] == "copy":
250
+ if src_path.is_dir():
251
+ shutil.copytree(src_path, dest)
252
+ else:
253
+ shutil.copy2(src_path, dest)
254
+ else:
255
+ shutil.move(str(src_path), str(dest))
256
+ pasted.append(dest.name)
257
+
258
+ if clipboard["operation"] == "cut":
259
+ clipboard = {"files": [], "operation": None}
260
+
261
+ logger.info("FileOps", f"Pasted: {', '.join(pasted)}")
262
+ return get_folder_contents(), get_breadcrumb(), f"βœ… Pasted: {', '.join(pasted)}", get_quick_access_folders()
263
+
264
+
265
+ def upload_files(files, session_id: str = ""):
266
+ from .events import event_manager
267
+
268
+ if not files:
269
+ return get_folder_contents(), get_breadcrumb(), "No files", get_quick_access_folders()
270
+
271
+ uploaded = []
272
+ for f in files:
273
+ if f is None:
274
+ continue
275
+ name = Path(f.name).name
276
+ dest = current_path / name
277
+ if dest.exists():
278
+ base, ext = Path(name).stem, Path(name).suffix
279
+ i = 1
280
+ while dest.exists():
281
+ dest = current_path / f"{base}_{i}{ext}"
282
+ i += 1
283
+ shutil.copy2(f.name, dest)
284
+ uploaded.append(dest.name)
285
+
286
+ if session_id:
287
+ event_manager.track_upload(session_id, str(dest))
288
+
289
+ logger.info("FileOps", f"Uploaded: {', '.join(uploaded)}")
290
+ return get_folder_contents(), get_breadcrumb(), f"βœ… Uploaded: {', '.join(uploaded)}", get_quick_access_folders()
291
+
292
+
293
+ def download_file(name: str):
294
+ if not name:
295
+ return None
296
+ path = current_path / name
297
+ return str(path) if path.exists() and path.is_file() else None
298
+
299
+
300
+ def get_file_preview(name: str) -> Tuple[Optional[str], str, str]:
301
+ if not name:
302
+ return None, "", "Select a file"
303
+
304
+ path = current_path / name
305
+ if not path.exists():
306
+ return None, "", "Not found"
307
+
308
+ if path.is_dir():
309
+ count = len(list(path.iterdir()))
310
+ return None, "", f"πŸ“ Folder with {count} items"
311
+
312
+ stat = path.stat()
313
+ info = f"πŸ“„ {name}\nπŸ“Š {format_size(stat.st_size)}\nπŸ• {format_timestamp(stat.st_mtime)}"
314
+
315
+ mime = get_mime_type(name)
316
+ if mime == 'image':
317
+ return str(path), "", info
318
+ elif mime == 'text':
319
+ try:
320
+ content = path.read_text()[:10000]
321
+ return None, content, info
322
+ except:
323
+ return None, "[Cannot read]", info
324
+
325
+ return None, f"[{Path(name).suffix.upper()[1:] or 'Binary'} file]", info
326
+
327
+
328
+ def search_files(query: str):
329
+ if not query:
330
+ return get_folder_contents(), get_breadcrumb(), get_status()
331
+
332
+ results = []
333
+ for f in STORAGE_DIR.rglob(f"*{query}*"):
334
+ is_dir = f.is_dir()
335
+ stat = f.stat()
336
+ results.append([
337
+ get_file_icon(f.name, is_dir),
338
+ f.name,
339
+ "Folder" if is_dir else (Path(f.name).suffix.upper()[1:] or "FILE"),
340
+ "" if is_dir else format_size(stat.st_size),
341
+ format_timestamp(stat.st_mtime)
342
+ ])
343
+
344
+ return results, f"πŸ” Search: {query}", f"Found {len(results)} items"
modules/hf_hub.py ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ HuggingFace Hub Module
3
+ Enhanced model search with filters, recommendations, and unified management.
4
+ """
5
+ import json
6
+ import requests
7
+ import re
8
+ from pathlib import Path
9
+ from typing import List, Dict, Tuple, Optional
10
+
11
+ from .config import HF_API_URL, HF_TOKEN, MODELS_DIR, DOWNLOADS_DIR
12
+ from .utils import format_size
13
+ from .logger import logger
14
+
15
+ # Text models directory
16
+ TEXT_MODELS_DIR = MODELS_DIR / "txt"
17
+ TEXT_MODELS_DIR.mkdir(parents=True, exist_ok=True)
18
+
19
+ # ══════════════════════════════════════════════════════════════════════════════
20
+ # MODEL COMPATIBILITY CHECKER
21
+ # ══════════════════════════════════════════════════════════════════════════════
22
+
23
+ # Free HF Space limits (CPU, 16GB RAM, ~15GB disk)
24
+ MAX_RECOMMENDED_SIZE_GB = 4.0 # Max recommended model size for free tier
25
+ MAX_PARAMS_BILLION = 7.0 # Max recommended parameters
26
+
27
+ def estimate_params_from_name(model_id: str) -> Optional[float]:
28
+ """Extract parameter count from model name (e.g., '7B', '13B', '1.1B')"""
29
+ name = model_id.lower()
30
+
31
+ # Common patterns: 7b, 7B, 7-b, 7_b, 7billion
32
+ patterns = [
33
+ r'(\d+\.?\d*)b(?:illion)?', # 7b, 7B, 7.5b
34
+ r'(\d+\.?\d*)-?b(?:illion)?', # 7-b
35
+ r'(\d+\.?\d*)_?b(?:illion)?', # 7_b
36
+ ]
37
+
38
+ for pattern in patterns:
39
+ match = re.search(pattern, name)
40
+ if match:
41
+ try:
42
+ return float(match.group(1))
43
+ except:
44
+ pass
45
+ return None
46
+
47
+
48
+ def estimate_size_from_name(model_id: str) -> Optional[float]:
49
+ """Estimate GGUF Q4 size in GB from parameter count"""
50
+ params = estimate_params_from_name(model_id)
51
+ if params:
52
+ # Q4_K_M is roughly 0.5-0.6 GB per billion params
53
+ return params * 0.55
54
+ return None
55
+
56
+
57
+ def check_model_compatibility(model_id: str, model_info: dict = None) -> Dict:
58
+ """
59
+ Check if model is compatible with free HF Space.
60
+ Returns dict with recommendation status and reason.
61
+ """
62
+ result = {
63
+ "recommended": False,
64
+ "status": "unknown",
65
+ "reason": "",
66
+ "params": None,
67
+ "estimated_size_gb": None
68
+ }
69
+
70
+ # Extract params from name
71
+ params = estimate_params_from_name(model_id)
72
+ result["params"] = params
73
+
74
+ if params:
75
+ result["estimated_size_gb"] = round(params * 0.55, 1)
76
+
77
+ if params <= 1.5:
78
+ result["recommended"] = True
79
+ result["status"] = "βœ… Best"
80
+ result["reason"] = f"~{params}B params - Fast on CPU"
81
+ elif params <= 3:
82
+ result["recommended"] = True
83
+ result["status"] = "βœ… Good"
84
+ result["reason"] = f"~{params}B params - Works well"
85
+ elif params <= MAX_PARAMS_BILLION:
86
+ result["recommended"] = True
87
+ result["status"] = "⚠️ OK"
88
+ result["reason"] = f"~{params}B params - May be slow"
89
+ elif params <= 13:
90
+ result["recommended"] = False
91
+ result["status"] = "⚠️ Large"
92
+ result["reason"] = f"~{params}B params - Very slow/may fail"
93
+ else:
94
+ result["recommended"] = False
95
+ result["status"] = "❌ Too Large"
96
+ result["reason"] = f"~{params}B params - Won't work on free tier"
97
+ else:
98
+ # Can't determine - check for known small models
99
+ lower_name = model_id.lower()
100
+ if any(x in lower_name for x in ['tiny', 'small', 'mini', 'phi-2', 'qwen2-0.5', 'qwen2-1.5']):
101
+ result["recommended"] = True
102
+ result["status"] = "βœ… Good"
103
+ result["reason"] = "Small model"
104
+ elif any(x in lower_name for x in ['70b', '65b', '40b', '30b', '20b']):
105
+ result["recommended"] = False
106
+ result["status"] = "❌ Too Large"
107
+ result["reason"] = "Too large for free tier"
108
+ else:
109
+ result["status"] = "❓ Unknown"
110
+ result["reason"] = "Check size manually"
111
+
112
+ return result
113
+
114
+
115
+ # ══════════════════════════════════════════════════════════════════════════════
116
+ # ENHANCED SEARCH
117
+ # ══════════════════════════════════════════════════════════════════════════════
118
+
119
+ def search_hf_models_enhanced(
120
+ query: str = "",
121
+ task: str = "text-generation",
122
+ library: str = "gguf",
123
+ sort: str = "downloads",
124
+ max_params: float = 7.0,
125
+ recommended_only: bool = False,
126
+ limit: int = 30,
127
+ offset: int = 0
128
+ ) -> Tuple[List[List], str, int]:
129
+ """
130
+ Enhanced HuggingFace model search with filtering.
131
+
132
+ Returns:
133
+ - rows: List of [Model ID, Params, Size, Status, Downloads, Action]
134
+ - status: Status message
135
+ - total: Total count (for pagination)
136
+ """
137
+ try:
138
+ params = {
139
+ "limit": min(limit + 20, 100), # Fetch extra for filtering
140
+ "sort": sort,
141
+ "direction": -1
142
+ }
143
+ if query:
144
+ params["search"] = query
145
+ if task:
146
+ params["pipeline_tag"] = task
147
+ if library:
148
+ params["library"] = library
149
+
150
+ headers = {"Authorization": f"Bearer {HF_TOKEN}"} if HF_TOKEN else {}
151
+ resp = requests.get(f"{HF_API_URL}/models", params=params, headers=headers, timeout=30)
152
+ resp.raise_for_status()
153
+
154
+ all_models = resp.json()
155
+
156
+ # Get list of installed models
157
+ installed = get_installed_model_ids()
158
+
159
+ rows = []
160
+ for m in all_models:
161
+ model_id = m.get("id", "")
162
+
163
+ # Check compatibility
164
+ compat = check_model_compatibility(model_id, m)
165
+
166
+ # Filter by max params if specified
167
+ if max_params and compat["params"] and compat["params"] > max_params:
168
+ continue
169
+
170
+ # Filter by recommended only
171
+ if recommended_only and not compat["recommended"]:
172
+ continue
173
+
174
+ # Determine if installed
175
+ is_installed = model_id in installed or any(model_id.replace('/', '_') in inst for inst in installed)
176
+ action = "βœ“ Installed" if is_installed else "Add"
177
+
178
+ # Format params display
179
+ params_display = f"{compat['params']}B" if compat['params'] else "?"
180
+ size_display = f"~{compat['estimated_size_gb']}GB" if compat['estimated_size_gb'] else "?"
181
+
182
+ rows.append([
183
+ model_id,
184
+ params_display,
185
+ size_display,
186
+ compat["status"],
187
+ f"{m.get('downloads', 0):,}",
188
+ action
189
+ ])
190
+
191
+ if len(rows) >= limit:
192
+ break
193
+
194
+ # Apply offset for pagination
195
+ if offset > 0:
196
+ rows = rows[offset:]
197
+
198
+ total = len(all_models)
199
+ logger.info("HFHub", f"Search: {query or 'all'}", {"found": len(rows), "total": total})
200
+ return rows, f"Found {len(rows)} models (showing {limit})", total
201
+
202
+ except Exception as e:
203
+ logger.error("HFHub", f"Search failed: {str(e)}")
204
+ return [], f"❌ {str(e)}", 0
205
+
206
+
207
+ def get_installed_model_ids() -> List[str]:
208
+ """Get list of installed model IDs"""
209
+ installed = []
210
+ for d in TEXT_MODELS_DIR.iterdir():
211
+ if d.is_dir():
212
+ installed.append(d.name)
213
+ for f in TEXT_MODELS_DIR.glob("*.gguf"):
214
+ installed.append(f.stem)
215
+ return installed
216
+
217
+
218
+ # ══════════════════════════════════════════════════════════════════════════════
219
+ # UNIFIED MODEL TABLE
220
+ # ══════════════════════════════════════════════════════════════════════════════
221
+
222
+ def get_unified_model_table() -> List[List]:
223
+ """
224
+ Get unified table of installed + available models.
225
+ Columns: [Model ID, Type, Size, Status, Loaded, Action]
226
+ """
227
+ rows = []
228
+
229
+ # Get installed models
230
+ for d in TEXT_MODELS_DIR.iterdir():
231
+ if d.is_dir():
232
+ # Check for GGUF files
233
+ gguf_files = list(d.glob("*.gguf"))
234
+ if gguf_files:
235
+ for f in gguf_files:
236
+ size = format_size(f.stat().st_size)
237
+ compat = check_model_compatibility(f.stem)
238
+ rows.append([
239
+ f.stem,
240
+ "GGUF",
241
+ size,
242
+ compat["status"],
243
+ "", # Loaded status filled by caller
244
+ "Remove"
245
+ ])
246
+ # Check for transformers config
247
+ elif (d / "config.json").exists():
248
+ total_size = sum(f.stat().st_size for f in d.rglob("*") if f.is_file())
249
+ rows.append([
250
+ d.name,
251
+ "Transformers",
252
+ format_size(total_size),
253
+ "βœ… Installed",
254
+ "",
255
+ "Remove"
256
+ ])
257
+
258
+ # Also check root level GGUF files
259
+ for f in TEXT_MODELS_DIR.glob("*.gguf"):
260
+ size = format_size(f.stat().st_size)
261
+ compat = check_model_compatibility(f.stem)
262
+ rows.append([
263
+ f.stem,
264
+ "GGUF",
265
+ size,
266
+ compat["status"],
267
+ "",
268
+ "Remove"
269
+ ])
270
+
271
+ return rows
272
+
273
+
274
+ # ═════════��════════════════════════════════════════════════════════════════════
275
+ # DOWNLOAD FUNCTIONS
276
+ # ══════════════════════════════════════════════════════════════════════════════
277
+
278
+ def download_model_file(model_id: str, filename: str) -> str:
279
+ """Download a file from HuggingFace model"""
280
+ if not model_id or not filename:
281
+ return "⚠️ Enter model ID and filename"
282
+
283
+ try:
284
+ # Save to txt models directory
285
+ dest_dir = TEXT_MODELS_DIR / model_id.replace('/', '_')
286
+ dest_dir.mkdir(parents=True, exist_ok=True)
287
+ dest_path = dest_dir / filename
288
+
289
+ url = f"https://huggingface.co/{model_id}/resolve/main/{filename}"
290
+ headers = {"Authorization": f"Bearer {HF_TOKEN}"} if HF_TOKEN else {}
291
+
292
+ logger.info("HFHub", f"Downloading: {model_id}/{filename}")
293
+
294
+ resp = requests.get(url, headers=headers, stream=True, timeout=300)
295
+ resp.raise_for_status()
296
+
297
+ # Get total size if available
298
+ total_size = int(resp.headers.get('content-length', 0))
299
+
300
+ downloaded = 0
301
+ with open(dest_path, 'wb') as f:
302
+ for chunk in resp.iter_content(chunk_size=8192):
303
+ f.write(chunk)
304
+ downloaded += len(chunk)
305
+
306
+ final_size = dest_path.stat().st_size
307
+ logger.info("HFHub", f"Downloaded: {model_id}/{filename}", {"size": format_size(final_size)})
308
+ return f"βœ… Downloaded: {filename} ({format_size(final_size)})"
309
+
310
+ except Exception as e:
311
+ logger.error("HFHub", f"Download failed: {str(e)}")
312
+ return f"❌ {str(e)}"
313
+
314
+
315
+ def download_from_url(url: str, filename: str = "") -> str:
316
+ """Download file from any URL"""
317
+ if not url:
318
+ return "⚠️ Enter URL"
319
+
320
+ try:
321
+ if not filename:
322
+ filename = url.split('/')[-1].split('?')[0] or "downloaded_file"
323
+
324
+ dest_path = TEXT_MODELS_DIR / filename
325
+ if dest_path.exists():
326
+ base, ext = Path(filename).stem, Path(filename).suffix
327
+ i = 1
328
+ while dest_path.exists():
329
+ dest_path = TEXT_MODELS_DIR / f"{base}_{i}{ext}"
330
+ i += 1
331
+
332
+ headers = {}
333
+ if "huggingface.co" in url and HF_TOKEN:
334
+ headers["Authorization"] = f"Bearer {HF_TOKEN}"
335
+
336
+ resp = requests.get(url, headers=headers, stream=True, timeout=300)
337
+ resp.raise_for_status()
338
+
339
+ with open(dest_path, 'wb') as f:
340
+ for chunk in resp.iter_content(chunk_size=8192):
341
+ f.write(chunk)
342
+
343
+ logger.info("Download", f"Downloaded from URL: {url[:50]}...")
344
+ return f"βœ… Downloaded: {dest_path.name} ({format_size(dest_path.stat().st_size)})"
345
+ except Exception as e:
346
+ logger.error("Download", f"Failed: {str(e)}")
347
+ return f"❌ {str(e)}"
348
+
349
+
350
+ def remove_model(model_id: str) -> str:
351
+ """Remove a downloaded model"""
352
+ try:
353
+ # Try directory first
354
+ model_dir = TEXT_MODELS_DIR / model_id.replace('/', '_')
355
+ if model_dir.exists() and model_dir.is_dir():
356
+ import shutil
357
+ shutil.rmtree(model_dir)
358
+ logger.info("HFHub", f"Removed model directory: {model_id}")
359
+ return f"βœ… Removed: {model_id}"
360
+
361
+ # Try direct GGUF file
362
+ for f in TEXT_MODELS_DIR.glob("*.gguf"):
363
+ if f.stem == model_id or model_id in f.stem:
364
+ f.unlink()
365
+ logger.info("HFHub", f"Removed model file: {f.name}")
366
+ return f"βœ… Removed: {f.name}"
367
+
368
+ return f"⚠️ Model not found: {model_id}"
369
+ except Exception as e:
370
+ logger.error("HFHub", f"Remove failed: {str(e)}")
371
+ return f"❌ {str(e)}"
372
+
373
+
374
+ def list_downloaded_models() -> str:
375
+ """List all downloaded models as JSON"""
376
+ models = []
377
+
378
+ for d in TEXT_MODELS_DIR.iterdir():
379
+ if d.is_dir():
380
+ files = list(d.rglob('*'))
381
+ size = sum(f.stat().st_size for f in files if f.is_file())
382
+ gguf_count = len(list(d.glob("*.gguf")))
383
+ models.append({
384
+ "name": d.name,
385
+ "type": "gguf" if gguf_count > 0 else "transformers",
386
+ "files": len([f for f in files if f.is_file()]),
387
+ "size": format_size(size)
388
+ })
389
+
390
+ # Also list root level GGUF files
391
+ for f in TEXT_MODELS_DIR.glob("*.gguf"):
392
+ models.append({
393
+ "name": f.stem,
394
+ "type": "gguf",
395
+ "files": 1,
396
+ "size": format_size(f.stat().st_size)
397
+ })
398
+
399
+ return json.dumps({"count": len(models), "models": models}, indent=2)
400
+
401
+
402
+ # ══════════════════════════════════════════════════════════════════════════════
403
+ # GGUF FILE FINDER
404
+ # ══════════════════════════════════════════════════════════════════════════════
405
+
406
+ def get_model_files(model_id: str) -> List[Dict]:
407
+ """Get list of GGUF files available for a model"""
408
+ try:
409
+ url = f"{HF_API_URL}/models/{model_id}"
410
+ headers = {"Authorization": f"Bearer {HF_TOKEN}"} if HF_TOKEN else {}
411
+ resp = requests.get(url, headers=headers, timeout=30)
412
+ resp.raise_for_status()
413
+
414
+ data = resp.json()
415
+ siblings = data.get("siblings", [])
416
+
417
+ gguf_files = []
418
+ for s in siblings:
419
+ filename = s.get("rfilename", "")
420
+ if filename.endswith(".gguf"):
421
+ # Parse quantization from filename
422
+ quant = "unknown"
423
+ for q in ["Q2_K", "Q3_K_S", "Q3_K_M", "Q3_K_L", "Q4_0", "Q4_K_S", "Q4_K_M", "Q5_0", "Q5_K_S", "Q5_K_M", "Q6_K", "Q8_0", "F16", "F32"]:
424
+ if q in filename.upper():
425
+ quant = q
426
+ break
427
+
428
+ gguf_files.append({
429
+ "filename": filename,
430
+ "quant": quant,
431
+ "recommended": quant in ["Q4_K_M", "Q4_K_S", "Q5_K_M"]
432
+ })
433
+
434
+ return gguf_files
435
+ except Exception as e:
436
+ logger.error("HFHub", f"Failed to get model files: {str(e)}")
437
+ return []
438
+
439
+
440
+ # ══════════════════════════════════════════════════════════════════════════════
441
+ # LEGACY COMPATIBILITY
442
+ # ══════════════════════════════════════════════════════════════════════════════
443
+
444
+ def search_hf_models(query: str = "", task: str = "", library: str = "", sort: str = "downloads"):
445
+ """Legacy search function - redirects to enhanced search"""
446
+ rows, status, _ = search_hf_models_enhanced(
447
+ query=query,
448
+ task=task,
449
+ library=library,
450
+ sort=sort,
451
+ max_params=None,
452
+ recommended_only=False,
453
+ limit=30
454
+ )
455
+ # Convert to old format
456
+ old_rows = [[r[0], task or "text-generation", r[4], ""] for r in rows]
457
+ return old_rows, status
modules/logger.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Logging Service Module
3
+ Centralized logging with file persistence.
4
+ """
5
+ import json
6
+ import threading
7
+ import uuid
8
+ from pathlib import Path
9
+ from datetime import datetime
10
+ from typing import List, Dict, Optional
11
+ from dataclasses import dataclass, asdict
12
+ from enum import Enum
13
+
14
+ from .config import LOGS_DIR
15
+
16
+
17
+ class LogLevel(Enum):
18
+ DEBUG = "DEBUG"
19
+ INFO = "INFO"
20
+ WARNING = "WARNING"
21
+ ERROR = "ERROR"
22
+ EVENT = "EVENT"
23
+
24
+
25
+ @dataclass
26
+ class LogEntry:
27
+ timestamp: str
28
+ level: str
29
+ category: str
30
+ message: str
31
+ details: Optional[Dict] = None
32
+ log_id: str = ""
33
+
34
+ def __post_init__(self):
35
+ if not self.log_id:
36
+ self.log_id = str(uuid.uuid4())[:8]
37
+
38
+
39
+ class LoggingService:
40
+ """Centralized logging service with file persistence"""
41
+
42
+ def __init__(self, log_dir: Path):
43
+ self.log_dir = log_dir
44
+ self.log_file = log_dir / "app.log"
45
+ self.event_log = log_dir / "events.log"
46
+ self.error_log = log_dir / "errors.log"
47
+ self.max_entries = 1000
48
+ self._lock = threading.Lock()
49
+
50
+ def _write_log(self, entry: LogEntry, file_path: Path):
51
+ with self._lock:
52
+ try:
53
+ logs = []
54
+ if file_path.exists():
55
+ try:
56
+ logs = json.loads(file_path.read_text())
57
+ except:
58
+ logs = []
59
+
60
+ logs.append(asdict(entry))
61
+
62
+ if len(logs) > self.max_entries:
63
+ logs = logs[-self.max_entries:]
64
+
65
+ file_path.write_text(json.dumps(logs, indent=2))
66
+ except Exception as e:
67
+ print(f"Log write error: {e}")
68
+
69
+ def log(self, level: LogLevel, category: str, message: str, details: Dict = None):
70
+ entry = LogEntry(
71
+ timestamp=datetime.now().isoformat(),
72
+ level=level.value,
73
+ category=category,
74
+ message=message,
75
+ details=details
76
+ )
77
+
78
+ self._write_log(entry, self.log_file)
79
+
80
+ if level == LogLevel.ERROR:
81
+ self._write_log(entry, self.error_log)
82
+ elif level == LogLevel.EVENT:
83
+ self._write_log(entry, self.event_log)
84
+
85
+ return entry.log_id
86
+
87
+ def info(self, category: str, message: str, details: Dict = None):
88
+ return self.log(LogLevel.INFO, category, message, details)
89
+
90
+ def error(self, category: str, message: str, details: Dict = None):
91
+ return self.log(LogLevel.ERROR, category, message, details)
92
+
93
+ def event(self, category: str, message: str, details: Dict = None):
94
+ return self.log(LogLevel.EVENT, category, message, details)
95
+
96
+ def warning(self, category: str, message: str, details: Dict = None):
97
+ return self.log(LogLevel.WARNING, category, message, details)
98
+
99
+ def get_logs(self, log_type: str = "all", limit: int = 100, level: str = None, category: str = None) -> List[Dict]:
100
+ file_map = {
101
+ "all": self.log_file,
102
+ "events": self.event_log,
103
+ "errors": self.error_log
104
+ }
105
+
106
+ file_path = file_map.get(log_type, self.log_file)
107
+
108
+ if not file_path.exists():
109
+ return []
110
+
111
+ try:
112
+ logs = json.loads(file_path.read_text())
113
+
114
+ if level:
115
+ logs = [l for l in logs if l.get("level") == level]
116
+ if category:
117
+ logs = [l for l in logs if l.get("category") == category]
118
+
119
+ return logs[-limit:][::-1]
120
+ except:
121
+ return []
122
+
123
+ def clear_logs(self, log_type: str = "all") -> bool:
124
+ try:
125
+ if log_type == "all":
126
+ for f in [self.log_file, self.event_log, self.error_log]:
127
+ if f.exists():
128
+ f.write_text("[]")
129
+ else:
130
+ file_map = {"events": self.event_log, "errors": self.error_log, "app": self.log_file}
131
+ if log_type in file_map and file_map[log_type].exists():
132
+ file_map[log_type].write_text("[]")
133
+ return True
134
+ except:
135
+ return False
136
+
137
+ def export_logs(self, log_type: str = "all") -> str:
138
+ logs = self.get_logs(log_type, limit=10000)
139
+ return json.dumps(logs, indent=2)
140
+
141
+
142
+ # Global logger instance
143
+ logger = LoggingService(LOGS_DIR)
modules/system.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ System Info Module
3
+ System monitoring and space information.
4
+ """
5
+ import os
6
+ import json
7
+ import platform
8
+ import psutil
9
+
10
+ from .config import STORAGE_DIR, HF_TOKEN
11
+ from .utils import format_size
12
+
13
+
14
+ def get_system_info() -> str:
15
+ """Get detailed system information"""
16
+ try:
17
+ mem = psutil.virtual_memory()
18
+ disk = psutil.disk_usage('/')
19
+ cpu = psutil.cpu_percent(interval=0.1)
20
+
21
+ return json.dumps({
22
+ "system": {
23
+ "platform": platform.system(),
24
+ "python": platform.python_version(),
25
+ },
26
+ "cpu": {
27
+ "cores": psutil.cpu_count(),
28
+ "usage": f"{cpu}%",
29
+ },
30
+ "memory": {
31
+ "total": format_size(mem.total),
32
+ "used": format_size(mem.used),
33
+ "percent": f"{mem.percent}%",
34
+ },
35
+ "disk": {
36
+ "total": format_size(disk.total),
37
+ "free": format_size(disk.free),
38
+ "percent": f"{disk.percent}%",
39
+ }
40
+ }, indent=2)
41
+ except Exception as e:
42
+ return json.dumps({"error": str(e)})
43
+
44
+
45
+ def get_system_dashboard() -> str:
46
+ """Get ASCII dashboard with bars"""
47
+ try:
48
+ mem = psutil.virtual_memory()
49
+ disk = psutil.disk_usage('/')
50
+ cpu = psutil.cpu_percent(interval=0.1)
51
+
52
+ def bar(pct):
53
+ filled = int(pct / 5)
54
+ return 'β–ˆ' * filled + 'β–‘' * (20 - filled)
55
+
56
+ return f"""
57
+ ╔════════════════════════════════════════════════════════╗
58
+ β•‘ πŸ“Š SYSTEM DASHBOARD β•‘
59
+ ╠════════════════════════════════════════════════════════╣
60
+ β•‘ πŸ’Ύ RAM: [{bar(mem.percent)}] {mem.percent:5.1f}% β•‘
61
+ β•‘ {format_size(mem.used):>10} / {format_size(mem.total):<10} β•‘
62
+ ╠════════════════════════════════════════════════════════╣
63
+ β•‘ πŸ’Ώ DISK: [{bar(disk.percent)}] {disk.percent:5.1f}% β•‘
64
+ β•‘ {format_size(disk.used):>10} / {format_size(disk.total):<10} β•‘
65
+ ╠════════════════════════════════════════════════════════╣
66
+ β•‘ πŸ”₯ CPU: [{bar(cpu)}] {cpu:5.1f}% β•‘
67
+ β•‘ {psutil.cpu_count()} cores β•‘
68
+ β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
69
+ """.strip()
70
+ except Exception as e:
71
+ return f"Error: {e}"
72
+
73
+
74
+ def get_space_info() -> str:
75
+ """Get HuggingFace space information"""
76
+ return json.dumps({
77
+ "space_id": os.getenv("SPACE_ID", "local"),
78
+ "author": os.getenv("SPACE_AUTHOR_NAME", "unknown"),
79
+ "hardware": os.getenv("SPACE_HARDWARE", "cpu-basic"),
80
+ "hf_token": "set" if HF_TOKEN else "not_set",
81
+ "storage_path": str(STORAGE_DIR),
82
+ }, indent=2)
83
+
84
+
85
+ def get_storage_stats() -> str:
86
+ """Get storage statistics"""
87
+ try:
88
+ files = list(STORAGE_DIR.rglob('*'))
89
+ total = sum(f.stat().st_size for f in files if f.is_file())
90
+ return json.dumps({
91
+ "files": len([f for f in files if f.is_file()]),
92
+ "folders": len([f for f in files if f.is_dir()]),
93
+ "total_size": format_size(total),
94
+ }, indent=2)
95
+ except Exception as e:
96
+ return json.dumps({"error": str(e)})
modules/text_ai.py ADDED
@@ -0,0 +1,842 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ TextAI Module
3
+ Chat and Roleplay with LLM support - GGUF and Transformers
4
+ Universal inference system with context, prompts, LoRA support
5
+ """
6
+ import json
7
+ import uuid
8
+ import threading
9
+ from pathlib import Path
10
+ from datetime import datetime
11
+ from typing import List, Dict, Optional, Generator, Any
12
+ from dataclasses import dataclass, asdict, field
13
+
14
+ from .config import STORAGE_DIR, MODELS_DIR
15
+ from .logger import logger
16
+
17
+ # Lazy import flags - these will be loaded only when needed
18
+ _llama_cpp = None
19
+ _transformers = None
20
+ _torch = None
21
+
22
+ def _get_llama_cpp():
23
+ """Lazy load llama-cpp-python"""
24
+ global _llama_cpp
25
+ if _llama_cpp is None:
26
+ try:
27
+ from llama_cpp import Llama
28
+ _llama_cpp = Llama
29
+ logger.info("TextAI", "llama-cpp-python loaded successfully")
30
+ except ImportError as e:
31
+ logger.warning("TextAI", f"llama-cpp-python not available: {e}")
32
+ _llama_cpp = False
33
+ return _llama_cpp if _llama_cpp else None
34
+
35
+ def _get_transformers():
36
+ """Lazy load transformers"""
37
+ global _transformers, _torch
38
+ if _transformers is None:
39
+ try:
40
+ from transformers import AutoModelForCausalLM, AutoTokenizer
41
+ import torch
42
+ _transformers = {"AutoModelForCausalLM": AutoModelForCausalLM, "AutoTokenizer": AutoTokenizer}
43
+ _torch = torch
44
+ logger.info("TextAI", "transformers loaded successfully")
45
+ except ImportError as e:
46
+ logger.warning("TextAI", f"transformers not available: {e}")
47
+ _transformers = False
48
+ return _transformers if _transformers else None
49
+
50
+ # Text models directory
51
+ TEXT_MODELS_DIR = MODELS_DIR / "txt"
52
+ TEXT_MODELS_DIR.mkdir(parents=True, exist_ok=True)
53
+
54
+ # Sessions directory
55
+ SESSIONS_DIR = STORAGE_DIR / "chat_sessions"
56
+ SESSIONS_DIR.mkdir(parents=True, exist_ok=True)
57
+
58
+ # Model config file
59
+ MODEL_CONFIG_FILE = TEXT_MODELS_DIR / "model_config.json"
60
+
61
+
62
+ @dataclass
63
+ class ChatMessage:
64
+ role: str # system, user, assistant
65
+ content: str
66
+ timestamp: str = ""
67
+
68
+ def __post_init__(self):
69
+ if not self.timestamp:
70
+ self.timestamp = datetime.now().isoformat()
71
+
72
+
73
+ @dataclass
74
+ class InferenceConfig:
75
+ """Universal inference configuration"""
76
+ max_tokens: int = 512
77
+ temperature: float = 0.7
78
+ top_p: float = 0.9
79
+ top_k: int = 40
80
+ repeat_penalty: float = 1.1
81
+ stop_sequences: List[str] = field(default_factory=list)
82
+ # LoRA settings
83
+ lora_path: str = ""
84
+ lora_scale: float = 1.0
85
+ # Context settings
86
+ context_length: int = 4096
87
+ # Extra options
88
+ seed: int = -1
89
+ stream: bool = False
90
+
91
+
92
+ @dataclass
93
+ class ChatSession:
94
+ session_id: str
95
+ title: str
96
+ messages: List[Dict]
97
+ created_at: str
98
+ updated_at: str
99
+ model_id: str = ""
100
+ session_type: str = "chat" # chat or roleplay
101
+ system_prompt: str = ""
102
+ # Personalization
103
+ inference_config: Dict = field(default_factory=dict)
104
+ custom_settings: Dict = field(default_factory=dict)
105
+
106
+
107
+ class ModelManager:
108
+ """Manages LLM models - loading, switching, inference"""
109
+
110
+ def __init__(self):
111
+ self.current_model = None
112
+ self.current_model_id = None
113
+ self.model_type = None # "gguf" or "transformers"
114
+ self.tokenizer = None
115
+ self.lora_path = None
116
+ self.default_config = InferenceConfig()
117
+ self._lock = threading.Lock()
118
+ self._load_config()
119
+
120
+ def _load_config(self):
121
+ """Load model configuration"""
122
+ if MODEL_CONFIG_FILE.exists():
123
+ try:
124
+ self.config = json.loads(MODEL_CONFIG_FILE.read_text())
125
+ except:
126
+ self.config = {"default_model": None, "models": {}}
127
+ else:
128
+ self.config = {"default_model": None, "models": {}}
129
+ self._save_config()
130
+
131
+ def _save_config(self):
132
+ """Save model configuration"""
133
+ MODEL_CONFIG_FILE.write_text(json.dumps(self.config, indent=2))
134
+
135
+ def get_available_models(self) -> List[Dict]:
136
+ """List all available models in /models/txt/"""
137
+ models = []
138
+
139
+ # Scan for GGUF files
140
+ for f in TEXT_MODELS_DIR.rglob("*.gguf"):
141
+ rel_path = str(f.relative_to(TEXT_MODELS_DIR))
142
+ size_gb = f.stat().st_size / (1024**3)
143
+ models.append({
144
+ "id": rel_path,
145
+ "name": f.stem,
146
+ "type": "gguf",
147
+ "size": f"{size_gb:.2f} GB",
148
+ "path": str(f),
149
+ "loaded": rel_path == self.current_model_id
150
+ })
151
+
152
+ # Scan for transformers models (directories with config.json)
153
+ for d in TEXT_MODELS_DIR.iterdir():
154
+ if d.is_dir() and (d / "config.json").exists():
155
+ models.append({
156
+ "id": d.name,
157
+ "name": d.name,
158
+ "type": "transformers",
159
+ "size": "varies",
160
+ "path": str(d),
161
+ "loaded": d.name == self.current_model_id
162
+ })
163
+
164
+ return models
165
+
166
+ def load_model(self, model_id: str) -> Dict:
167
+ """Load a model by ID"""
168
+ with self._lock:
169
+ try:
170
+ # Unload current model
171
+ if self.current_model is not None:
172
+ self.unload_model()
173
+
174
+ models = self.get_available_models()
175
+ model_info = next((m for m in models if m["id"] == model_id), None)
176
+
177
+ if not model_info:
178
+ return {"success": False, "error": f"Model not found: {model_id}"}
179
+
180
+ if model_info["type"] == "gguf":
181
+ return self._load_gguf(model_info)
182
+ else:
183
+ return self._load_transformers(model_info)
184
+
185
+ except Exception as e:
186
+ logger.error("ModelManager", f"Failed to load model: {str(e)}")
187
+ return {"success": False, "error": str(e)}
188
+
189
+ def _load_gguf(self, model_info: Dict, lora_path: str = "", n_ctx: int = 4096) -> Dict:
190
+ """Load GGUF model with llama-cpp-python"""
191
+ try:
192
+ Llama = _get_llama_cpp()
193
+ if Llama is None:
194
+ return {"success": False, "error": "llama-cpp-python not installed. Install with: pip install llama-cpp-python"}
195
+
196
+ # Build model kwargs
197
+ model_kwargs = {
198
+ "model_path": model_info["path"],
199
+ "n_ctx": n_ctx,
200
+ "n_threads": 4,
201
+ "n_gpu_layers": 0, # CPU only
202
+ "verbose": False
203
+ }
204
+
205
+ # Add LoRA if specified
206
+ if lora_path and Path(lora_path).exists():
207
+ model_kwargs["lora_path"] = lora_path
208
+ logger.info("ModelManager", f"Loading with LoRA: {lora_path}")
209
+
210
+ self.current_model = Llama(**model_kwargs)
211
+ self.current_model_id = model_info["id"]
212
+ self.model_type = "gguf"
213
+ self.lora_path = lora_path
214
+
215
+ logger.info("ModelManager", f"Loaded GGUF model: {model_info['name']}")
216
+ return {"success": True, "model": model_info["name"], "type": "gguf", "lora": lora_path or None}
217
+
218
+ except Exception as e:
219
+ logger.error("ModelManager", f"GGUF load error: {str(e)}")
220
+ return {"success": False, "error": str(e)}
221
+
222
+ def _load_transformers(self, model_info: Dict) -> Dict:
223
+ """Load transformers model"""
224
+ try:
225
+ tf = _get_transformers()
226
+ if tf is None:
227
+ return {"success": False, "error": "transformers not installed. Install with: pip install transformers torch"}
228
+
229
+ global _torch
230
+ AutoModelForCausalLM = tf["AutoModelForCausalLM"]
231
+ AutoTokenizer = tf["AutoTokenizer"]
232
+
233
+ self.tokenizer = AutoTokenizer.from_pretrained(model_info["path"])
234
+ self.current_model = AutoModelForCausalLM.from_pretrained(
235
+ model_info["path"],
236
+ torch_dtype=_torch.float32,
237
+ device_map="cpu",
238
+ low_cpu_mem_usage=True
239
+ )
240
+ self.current_model_id = model_info["id"]
241
+ self.model_type = "transformers"
242
+
243
+ logger.info("ModelManager", f"Loaded transformers model: {model_info['name']}")
244
+ return {"success": True, "model": model_info["name"], "type": "transformers"}
245
+
246
+ except Exception as e:
247
+ logger.error("ModelManager", f"Transformers load error: {str(e)}")
248
+ return {"success": False, "error": str(e)}
249
+
250
+ def unload_model(self):
251
+ """Unload current model to free memory"""
252
+ if self.current_model is not None:
253
+ del self.current_model
254
+ self.current_model = None
255
+
256
+ if self.tokenizer is not None:
257
+ del self.tokenizer
258
+ self.tokenizer = None
259
+
260
+ self.current_model_id = None
261
+ self.model_type = None
262
+
263
+ # Force garbage collection
264
+ import gc
265
+ gc.collect()
266
+
267
+ logger.info("ModelManager", "Model unloaded")
268
+
269
+ def generate(
270
+ self,
271
+ messages: List[Dict],
272
+ max_tokens: int = 512,
273
+ temperature: float = 0.7,
274
+ top_p: float = 0.9,
275
+ stream: bool = False
276
+ ) -> str:
277
+ """Generate response from loaded model"""
278
+ if self.current_model is None:
279
+ return "[Error: No model loaded]"
280
+
281
+ try:
282
+ if self.model_type == "gguf":
283
+ return self._generate_gguf(messages, max_tokens, temperature, top_p, stream)
284
+ else:
285
+ return self._generate_transformers(messages, max_tokens, temperature, top_p)
286
+ except Exception as e:
287
+ logger.error("ModelManager", f"Generation error: {str(e)}")
288
+ return f"[Error: {str(e)}]"
289
+
290
+ def _generate_gguf(
291
+ self,
292
+ messages: List[Dict],
293
+ max_tokens: int,
294
+ temperature: float,
295
+ top_p: float,
296
+ stream: bool
297
+ ) -> str:
298
+ """Generate with GGUF model"""
299
+ response = self.current_model.create_chat_completion(
300
+ messages=messages,
301
+ max_tokens=max_tokens,
302
+ temperature=temperature,
303
+ top_p=top_p,
304
+ stream=False
305
+ )
306
+ return response["choices"][0]["message"]["content"]
307
+
308
+ def _generate_transformers(
309
+ self,
310
+ messages: List[Dict],
311
+ max_tokens: int,
312
+ temperature: float,
313
+ top_p: float
314
+ ) -> str:
315
+ """Generate with transformers model"""
316
+ global _torch
317
+ if _torch is None:
318
+ return "[Error: torch not loaded]"
319
+
320
+ # Format messages
321
+ prompt = ""
322
+ for msg in messages:
323
+ if msg["role"] == "system":
324
+ prompt += f"System: {msg['content']}\n\n"
325
+ elif msg["role"] == "user":
326
+ prompt += f"User: {msg['content']}\n\n"
327
+ elif msg["role"] == "assistant":
328
+ prompt += f"Assistant: {msg['content']}\n\n"
329
+ prompt += "Assistant: "
330
+
331
+ inputs = self.tokenizer(prompt, return_tensors="pt")
332
+
333
+ with _torch.no_grad():
334
+ outputs = self.current_model.generate(
335
+ inputs.input_ids,
336
+ max_new_tokens=max_tokens,
337
+ temperature=temperature,
338
+ top_p=top_p,
339
+ do_sample=True,
340
+ pad_token_id=self.tokenizer.eos_token_id
341
+ )
342
+
343
+ response = self.tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
344
+ return response.strip()
345
+
346
+ def get_status(self) -> Dict:
347
+ """Get current model status"""
348
+ return {
349
+ "loaded": self.current_model is not None,
350
+ "model_id": self.current_model_id,
351
+ "model_type": self.model_type
352
+ }
353
+
354
+
355
+ class SessionManager:
356
+ """Manages chat sessions - create, save, load, list"""
357
+
358
+ def __init__(self):
359
+ self.sessions_dir = SESSIONS_DIR
360
+
361
+ def create_session(
362
+ self,
363
+ title: str = "",
364
+ session_type: str = "chat",
365
+ system_prompt: str = "",
366
+ model_id: str = ""
367
+ ) -> ChatSession:
368
+ """Create a new chat session"""
369
+ session_id = str(uuid.uuid4())[:8]
370
+
371
+ if not title:
372
+ title = f"{'Chat' if session_type == 'chat' else 'Roleplay'} {datetime.now().strftime('%m/%d %H:%M')}"
373
+
374
+ session = ChatSession(
375
+ session_id=session_id,
376
+ title=title,
377
+ messages=[],
378
+ created_at=datetime.now().isoformat(),
379
+ updated_at=datetime.now().isoformat(),
380
+ model_id=model_id,
381
+ session_type=session_type,
382
+ system_prompt=system_prompt
383
+ )
384
+
385
+ self._save_session(session)
386
+ logger.event("Session", f"Created session: {session_id}", {"type": session_type})
387
+
388
+ return session
389
+
390
+ def _save_session(self, session: ChatSession):
391
+ """Save session to file"""
392
+ file_path = self.sessions_dir / f"{session.session_id}.json"
393
+ file_path.write_text(json.dumps(asdict(session), indent=2))
394
+
395
+ def load_session(self, session_id: str) -> Optional[ChatSession]:
396
+ """Load a session by ID"""
397
+ file_path = self.sessions_dir / f"{session_id}.json"
398
+ if not file_path.exists():
399
+ return None
400
+
401
+ try:
402
+ data = json.loads(file_path.read_text())
403
+ return ChatSession(**data)
404
+ except:
405
+ return None
406
+
407
+ def update_session(self, session: ChatSession):
408
+ """Update session (auto-save)"""
409
+ session.updated_at = datetime.now().isoformat()
410
+ self._save_session(session)
411
+
412
+ def add_message(self, session_id: str, role: str, content: str) -> Optional[ChatSession]:
413
+ """Add a message to session"""
414
+ session = self.load_session(session_id)
415
+ if not session:
416
+ return None
417
+
418
+ message = ChatMessage(role=role, content=content)
419
+ session.messages.append(asdict(message))
420
+ self.update_session(session)
421
+
422
+ return session
423
+
424
+ def list_sessions(self, session_type: str = None) -> List[Dict]:
425
+ """List all sessions"""
426
+ sessions = []
427
+
428
+ for f in self.sessions_dir.glob("*.json"):
429
+ try:
430
+ data = json.loads(f.read_text())
431
+ if session_type and data.get("session_type") != session_type:
432
+ continue
433
+
434
+ sessions.append({
435
+ "session_id": data["session_id"],
436
+ "title": data["title"],
437
+ "session_type": data.get("session_type", "chat"),
438
+ "message_count": len(data.get("messages", [])),
439
+ "updated_at": data["updated_at"],
440
+ "model_id": data.get("model_id", "")
441
+ })
442
+ except:
443
+ continue
444
+
445
+ # Sort by updated_at descending
446
+ sessions.sort(key=lambda x: x["updated_at"], reverse=True)
447
+ return sessions
448
+
449
+ def delete_session(self, session_id: str) -> bool:
450
+ """Delete a session"""
451
+ file_path = self.sessions_dir / f"{session_id}.json"
452
+ if file_path.exists():
453
+ file_path.unlink()
454
+ logger.event("Session", f"Deleted session: {session_id}")
455
+ return True
456
+ return False
457
+
458
+ def rename_session(self, session_id: str, new_title: str) -> bool:
459
+ """Rename a session"""
460
+ session = self.load_session(session_id)
461
+ if session:
462
+ session.title = new_title
463
+ self.update_session(session)
464
+ return True
465
+ return False
466
+
467
+ def clear_session(self, session_id: str) -> bool:
468
+ """Clear messages from session"""
469
+ session = self.load_session(session_id)
470
+ if session:
471
+ session.messages = []
472
+ self.update_session(session)
473
+ return True
474
+ return False
475
+
476
+
477
+ # Global instances
478
+ model_manager = ModelManager()
479
+ session_manager = SessionManager()
480
+
481
+
482
+ # ══════════════════════════════════════════════════════════════════════════════
483
+ # API FUNCTIONS
484
+ # ══════════════════════════════════════════════════════════════════════════════
485
+
486
+ def api_list_models() -> str:
487
+ """API: List available models"""
488
+ models = model_manager.get_available_models()
489
+ return json.dumps({"models": models, "count": len(models)}, indent=2)
490
+
491
+
492
+ def api_load_model(model_id: str) -> str:
493
+ """API: Load a model"""
494
+ result = model_manager.load_model(model_id)
495
+ return json.dumps(result)
496
+
497
+
498
+ def api_unload_model() -> str:
499
+ """API: Unload current model"""
500
+ model_manager.unload_model()
501
+ return json.dumps({"success": True})
502
+
503
+
504
+ def api_model_status() -> str:
505
+ """API: Get model status"""
506
+ return json.dumps(model_manager.get_status())
507
+
508
+
509
+ def api_chat(
510
+ session_id: str,
511
+ message: str,
512
+ max_tokens: int = 512,
513
+ temperature: float = 0.7
514
+ ) -> str:
515
+ """API: Send chat message and get response"""
516
+ session = session_manager.load_session(session_id)
517
+ if not session:
518
+ return json.dumps({"success": False, "error": "Session not found"})
519
+
520
+ if model_manager.current_model is None:
521
+ return json.dumps({"success": False, "error": "No model loaded"})
522
+
523
+ # Add user message
524
+ session_manager.add_message(session_id, "user", message)
525
+
526
+ # Build messages for model
527
+ messages = []
528
+ if session.system_prompt:
529
+ messages.append({"role": "system", "content": session.system_prompt})
530
+
531
+ for msg in session.messages:
532
+ messages.append({"role": msg["role"], "content": msg["content"]})
533
+
534
+ # Generate response
535
+ response = model_manager.generate(
536
+ messages=messages,
537
+ max_tokens=max_tokens,
538
+ temperature=temperature
539
+ )
540
+
541
+ # Add assistant response
542
+ session_manager.add_message(session_id, "assistant", response)
543
+
544
+ return json.dumps({
545
+ "success": True,
546
+ "response": response,
547
+ "session_id": session_id
548
+ })
549
+
550
+
551
+ def api_create_session(
552
+ title: str = "",
553
+ session_type: str = "chat",
554
+ system_prompt: str = ""
555
+ ) -> str:
556
+ """API: Create new session"""
557
+ model_id = model_manager.current_model_id or ""
558
+ session = session_manager.create_session(title, session_type, system_prompt, model_id)
559
+ return json.dumps({"success": True, "session_id": session.session_id, "title": session.title})
560
+
561
+
562
+ def api_list_sessions(session_type: str = "") -> str:
563
+ """API: List all sessions"""
564
+ sessions = session_manager.list_sessions(session_type if session_type else None)
565
+ return json.dumps({"sessions": sessions, "count": len(sessions)}, indent=2)
566
+
567
+
568
+ def api_get_session(session_id: str) -> str:
569
+ """API: Get session with messages"""
570
+ session = session_manager.load_session(session_id)
571
+ if not session:
572
+ return json.dumps({"success": False, "error": "Session not found"})
573
+ return json.dumps({"success": True, "session": asdict(session)}, indent=2)
574
+
575
+
576
+ def api_delete_session(session_id: str) -> str:
577
+ """API: Delete session"""
578
+ success = session_manager.delete_session(session_id)
579
+ return json.dumps({"success": success})
580
+
581
+
582
+ # ══════════════════════════════════════════════════════════════════════════════
583
+ # UNIVERSAL INFERENCE API
584
+ # ══════════════════════════════════════════════════════════════════════════════
585
+
586
+ def api_inference(
587
+ prompt: str = "",
588
+ messages: str = "[]",
589
+ system_prompt: str = "",
590
+ max_tokens: int = 512,
591
+ temperature: float = 0.7,
592
+ top_p: float = 0.9,
593
+ top_k: int = 40,
594
+ repeat_penalty: float = 1.1,
595
+ stop_sequences: str = "[]",
596
+ context: str = "",
597
+ extra_context: str = ""
598
+ ) -> str:
599
+ """
600
+ API: Universal inference endpoint
601
+
602
+ Args:
603
+ prompt: Direct prompt (if not using messages)
604
+ messages: JSON array of {role, content} messages
605
+ system_prompt: System prompt to prepend
606
+ max_tokens: Maximum tokens to generate
607
+ temperature: Sampling temperature
608
+ top_p: Top-p sampling
609
+ top_k: Top-k sampling
610
+ repeat_penalty: Repetition penalty
611
+ stop_sequences: JSON array of stop sequences
612
+ context: Additional context to include
613
+ extra_context: Extra context (e.g., from RAG)
614
+
615
+ Returns:
616
+ JSON with response and metadata
617
+ """
618
+ if model_manager.current_model is None:
619
+ return json.dumps({"success": False, "error": "No model loaded"})
620
+
621
+ try:
622
+ # Parse messages if provided
623
+ msg_list = json.loads(messages) if messages and messages != "[]" else []
624
+ stop_list = json.loads(stop_sequences) if stop_sequences and stop_sequences != "[]" else []
625
+
626
+ # Build full message list
627
+ full_messages = []
628
+
629
+ # Add system prompt if provided
630
+ if system_prompt:
631
+ full_messages.append({"role": "system", "content": system_prompt})
632
+
633
+ # Add context if provided
634
+ if context or extra_context:
635
+ context_text = f"{context}\n{extra_context}".strip()
636
+ full_messages.append({"role": "system", "content": f"Context:\n{context_text}"})
637
+
638
+ # Add conversation messages
639
+ full_messages.extend(msg_list)
640
+
641
+ # Add direct prompt if provided
642
+ if prompt:
643
+ full_messages.append({"role": "user", "content": prompt})
644
+
645
+ if not full_messages:
646
+ return json.dumps({"success": False, "error": "No prompt or messages provided"})
647
+
648
+ # Generate response
649
+ response = model_manager.generate(
650
+ messages=full_messages,
651
+ max_tokens=max_tokens,
652
+ temperature=temperature,
653
+ top_p=top_p
654
+ )
655
+
656
+ return json.dumps({
657
+ "success": True,
658
+ "response": response,
659
+ "model": model_manager.current_model_id,
660
+ "tokens_used": len(response.split()), # Approximate
661
+ "config": {
662
+ "max_tokens": max_tokens,
663
+ "temperature": temperature,
664
+ "top_p": top_p
665
+ }
666
+ })
667
+
668
+ except Exception as e:
669
+ logger.error("API", f"Inference error: {str(e)}")
670
+ return json.dumps({"success": False, "error": str(e)})
671
+
672
+
673
+ def api_chat_with_config(
674
+ session_id: str,
675
+ message: str,
676
+ config: str = "{}"
677
+ ) -> str:
678
+ """
679
+ API: Chat with custom inference config
680
+
681
+ Args:
682
+ session_id: Session ID
683
+ message: User message
684
+ config: JSON config with max_tokens, temperature, top_p, etc.
685
+ """
686
+ session = session_manager.load_session(session_id)
687
+ if not session:
688
+ return json.dumps({"success": False, "error": "Session not found"})
689
+
690
+ if model_manager.current_model is None:
691
+ return json.dumps({"success": False, "error": "No model loaded"})
692
+
693
+ try:
694
+ cfg = json.loads(config) if config and config != "{}" else {}
695
+ max_tokens = cfg.get("max_tokens", 512)
696
+ temperature = cfg.get("temperature", 0.7)
697
+ top_p = cfg.get("top_p", 0.9)
698
+
699
+ # Add user message
700
+ session_manager.add_message(session_id, "user", message)
701
+
702
+ # Build messages for model - include full history for context consistency
703
+ messages = []
704
+ if session.system_prompt:
705
+ messages.append({"role": "system", "content": session.system_prompt})
706
+
707
+ # Load fresh session to get updated messages
708
+ session = session_manager.load_session(session_id)
709
+ for msg in session.messages:
710
+ messages.append({"role": msg["role"], "content": msg["content"]})
711
+
712
+ # Generate response
713
+ response = model_manager.generate(
714
+ messages=messages,
715
+ max_tokens=max_tokens,
716
+ temperature=temperature,
717
+ top_p=top_p
718
+ )
719
+
720
+ # Add assistant response
721
+ session_manager.add_message(session_id, "assistant", response)
722
+
723
+ return json.dumps({
724
+ "success": True,
725
+ "response": response,
726
+ "session_id": session_id,
727
+ "message_count": len(session.messages) + 1,
728
+ "config_used": {"max_tokens": max_tokens, "temperature": temperature}
729
+ })
730
+
731
+ except Exception as e:
732
+ return json.dumps({"success": False, "error": str(e)})
733
+
734
+
735
+ def api_get_session_context(session_id: str, include_system: bool = True) -> str:
736
+ """
737
+ API: Get full session context for long chats
738
+ Returns all messages formatted for external use
739
+ """
740
+ session = session_manager.load_session(session_id)
741
+ if not session:
742
+ return json.dumps({"success": False, "error": "Session not found"})
743
+
744
+ messages = []
745
+ if include_system and session.system_prompt:
746
+ messages.append({"role": "system", "content": session.system_prompt})
747
+
748
+ for msg in session.messages:
749
+ messages.append({
750
+ "role": msg["role"],
751
+ "content": msg["content"],
752
+ "timestamp": msg.get("timestamp", "")
753
+ })
754
+
755
+ return json.dumps({
756
+ "success": True,
757
+ "session_id": session_id,
758
+ "title": session.title,
759
+ "session_type": session.session_type,
760
+ "message_count": len(session.messages),
761
+ "messages": messages,
762
+ "system_prompt": session.system_prompt
763
+ }, indent=2)
764
+
765
+
766
+ def api_update_session_config(session_id: str, config: str = "{}") -> str:
767
+ """
768
+ API: Update session inference config for personalization
769
+ """
770
+ session = session_manager.load_session(session_id)
771
+ if not session:
772
+ return json.dumps({"success": False, "error": "Session not found"})
773
+
774
+ try:
775
+ cfg = json.loads(config)
776
+ session.inference_config = cfg
777
+ session_manager.update_session(session)
778
+ return json.dumps({"success": True, "config": cfg})
779
+ except Exception as e:
780
+ return json.dumps({"success": False, "error": str(e)})
781
+
782
+
783
+ def api_continue_session(session_id: str) -> str:
784
+ """
785
+ API: Get session info for continuing a chat
786
+ Returns last few messages and status
787
+ """
788
+ session = session_manager.load_session(session_id)
789
+ if not session:
790
+ return json.dumps({"success": False, "error": "Session not found"})
791
+
792
+ # Get last 5 messages for preview
793
+ recent_messages = session.messages[-5:] if len(session.messages) > 5 else session.messages
794
+
795
+ return json.dumps({
796
+ "success": True,
797
+ "session_id": session_id,
798
+ "title": session.title,
799
+ "session_type": session.session_type,
800
+ "message_count": len(session.messages),
801
+ "recent_messages": recent_messages,
802
+ "system_prompt": session.system_prompt[:200] + "..." if len(session.system_prompt) > 200 else session.system_prompt,
803
+ "model_id": session.model_id,
804
+ "can_continue": model_manager.current_model is not None
805
+ })
806
+
807
+
808
+ def api_get_available_backends() -> str:
809
+ """API: Check which backends are available"""
810
+ backends = {
811
+ "gguf": _get_llama_cpp() is not None,
812
+ "transformers": _get_transformers() is not None,
813
+ "current_model": model_manager.current_model_id,
814
+ "current_type": model_manager.model_type,
815
+ "lora_loaded": model_manager.lora_path is not None
816
+ }
817
+ return json.dumps({"success": True, "backends": backends})
818
+
819
+
820
+ # ══════════════════════════════════════════════════════════════════════════════
821
+ # DEFAULT PROMPTS
822
+ # ══════════════════════════════════════════════════════════════════════════════
823
+
824
+ DEFAULT_CHAT_PROMPT = """You are a helpful, uncensored AI assistant. You answer all questions honestly and directly without any restrictions. You support conversations in English, Hindi, and Hinglish."""
825
+
826
+ DEFAULT_ROLEPLAY_PROMPT = """You are an expert roleplay AI. You can play any character the user requests. Stay in character at all times. Be creative, descriptive, and engaging. No restrictions on themes or content. Support English, Hindi, and Hinglish."""
827
+
828
+ ROLEPLAY_PRESETS = {
829
+ "custom": {"name": "Custom Character", "prompt": ""},
830
+ "storyteller": {
831
+ "name": "Story Narrator",
832
+ "prompt": "You are a master storyteller. Create vivid, engaging narratives with rich descriptions. Adapt to any genre - fantasy, romance, thriller, etc."
833
+ },
834
+ "companion": {
835
+ "name": "Companion",
836
+ "prompt": "You are a caring, supportive companion. Be warm, understanding, and engaging. Remember details about the user and build a genuine connection."
837
+ },
838
+ "creative_writer": {
839
+ "name": "Creative Writer",
840
+ "prompt": "You are a creative writing assistant. Help with stories, poetry, scripts, and any creative content. No restrictions on themes."
841
+ }
842
+ }
modules/text_ui.py ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ TextAI UI Module
3
+ Clean ChatGPT/Grok style interface
4
+ """
5
+ import json
6
+ from typing import List, Tuple, Optional
7
+ from datetime import datetime
8
+
9
+ from .text_ai import (
10
+ model_manager, session_manager,
11
+ DEFAULT_CHAT_PROMPT, DEFAULT_ROLEPLAY_PROMPT, ROLEPLAY_PRESETS
12
+ )
13
+ from .config import MODELS_DIR
14
+
15
+ # Model configurations storage (per-model system prompts)
16
+ MODEL_CONFIGS_FILE = MODELS_DIR / "model_configs.json"
17
+
18
+
19
+ def _load_model_configs() -> dict:
20
+ """Load per-model configurations"""
21
+ if MODEL_CONFIGS_FILE.exists():
22
+ try:
23
+ return json.loads(MODEL_CONFIGS_FILE.read_text())
24
+ except:
25
+ pass
26
+ return {}
27
+
28
+
29
+ def _save_model_configs(configs: dict):
30
+ """Save per-model configurations"""
31
+ MODEL_CONFIGS_FILE.parent.mkdir(parents=True, exist_ok=True)
32
+ MODEL_CONFIGS_FILE.write_text(json.dumps(configs, indent=2))
33
+
34
+
35
+ # ══════════════════════════════════════════════════════════════════════════════
36
+ # CHAT SESSION HELPERS
37
+ # ══════════════════════════════════════════════════════════════════════════════
38
+
39
+ def get_chat_list() -> List[List[str]]:
40
+ """Get all chat sessions sorted by recent - clean display"""
41
+ sessions = session_manager.list_sessions() # All types
42
+ rows = []
43
+ for s in sessions[:30]:
44
+ # Show only title (auto-generated or renamed)
45
+ title = s["title"][:35] + "..." if len(s["title"]) > 35 else s["title"]
46
+ session_type = "🎭" if s.get("session_type") == "roleplay" else "πŸ’¬"
47
+ rows.append([s["session_id"], f"{session_type} {title}"])
48
+ return rows
49
+
50
+
51
+ def get_model_choices() -> List[str]:
52
+ """Get available models for dropdown"""
53
+ models = model_manager.get_available_models()
54
+ choices = []
55
+ for m in models:
56
+ status = "βœ“ " if m["loaded"] else ""
57
+ choices.append(f"{status}{m['name']} ({m['type']})")
58
+ return choices if choices else ["No models available"]
59
+
60
+
61
+ def get_current_model_display() -> str:
62
+ """Get current model for display"""
63
+ status = model_manager.get_status()
64
+ if status["loaded"]:
65
+ return f"{status['model_id']}"
66
+ return "No model loaded"
67
+
68
+
69
+ def format_chat_history(session_id: str) -> List[dict]:
70
+ """Format session messages for Gradio Chatbot (new format)"""
71
+ session = session_manager.load_session(session_id)
72
+ if not session:
73
+ return []
74
+
75
+ history = []
76
+ for msg in session.messages:
77
+ if msg["role"] == "user":
78
+ history.append({"role": "user", "content": msg["content"]})
79
+ elif msg["role"] == "assistant":
80
+ history.append({"role": "assistant", "content": msg["content"]})
81
+
82
+ return history
83
+
84
+
85
+ def format_chat_history_tuples(session_id: str) -> List[Tuple[str, str]]:
86
+ """Format session messages for Gradio Chatbot (tuple format for compatibility)"""
87
+ session = session_manager.load_session(session_id)
88
+ if not session:
89
+ return []
90
+
91
+ history = []
92
+ user_msg = None
93
+
94
+ for msg in session.messages:
95
+ if msg["role"] == "user":
96
+ user_msg = msg["content"]
97
+ elif msg["role"] == "assistant" and user_msg:
98
+ history.append((user_msg, msg["content"]))
99
+ user_msg = None
100
+
101
+ if user_msg:
102
+ history.append((user_msg, None))
103
+
104
+ return history
105
+
106
+
107
+ # ══════════════════════════════════════════════════════════════════════════════
108
+ # MAIN CHAT FUNCTIONS
109
+ # ══════════════════════════════════════════════════════════════════════════════
110
+
111
+ def ui_new_chat(mode: str = "chat"):
112
+ """Create new chat session"""
113
+ # Get system prompt based on mode and current model
114
+ if mode == "roleplay":
115
+ system_prompt = DEFAULT_ROLEPLAY_PROMPT
116
+ session_type = "roleplay"
117
+ else:
118
+ # Use model-specific prompt if available
119
+ model_id = model_manager.current_model_id
120
+ configs = _load_model_configs()
121
+ system_prompt = configs.get(model_id, {}).get("system_prompt", DEFAULT_CHAT_PROMPT)
122
+ session_type = "chat"
123
+
124
+ session = session_manager.create_session("", session_type, system_prompt)
125
+ return (
126
+ session.session_id,
127
+ [],
128
+ get_chat_list(),
129
+ session.title
130
+ )
131
+
132
+
133
+ def ui_load_session(evt, sessions_data):
134
+ """Load session from sidebar click"""
135
+ try:
136
+ if hasattr(evt, 'index') and evt.index[0] < len(sessions_data):
137
+ session_id = sessions_data[evt.index[0]][0]
138
+ session = session_manager.load_session(session_id)
139
+ if session:
140
+ history = format_chat_history_tuples(session_id)
141
+ return session_id, history, session.title, session.session_type == "roleplay"
142
+ except:
143
+ pass
144
+ return "", [], "", False
145
+
146
+
147
+ def ui_send_message(
148
+ session_id: str,
149
+ message: str,
150
+ history: List,
151
+ max_tokens: int,
152
+ temperature: float,
153
+ is_roleplay: bool = False
154
+ ):
155
+ """Send message and stream response"""
156
+ if not session_id:
157
+ # Auto-create session
158
+ mode = "roleplay" if is_roleplay else "chat"
159
+ session_id, _, _, _ = ui_new_chat(mode)
160
+
161
+ if not message.strip():
162
+ yield history, "", session_id, get_chat_list()
163
+ return
164
+
165
+ if model_manager.current_model is None:
166
+ history = history + [(message, "Please load a model first from the menu.")]
167
+ yield history, "", session_id, get_chat_list()
168
+ return
169
+
170
+ # Add user message
171
+ history = history + [(message, None)]
172
+ yield history, "", session_id, get_chat_list()
173
+
174
+ # Get response
175
+ from .text_ai import api_chat
176
+ result = json.loads(api_chat(session_id, message, max_tokens, temperature))
177
+
178
+ if result["success"]:
179
+ history[-1] = (message, result["response"])
180
+ # Update title if first message
181
+ session = session_manager.load_session(session_id)
182
+ title = session.title if session else ""
183
+ else:
184
+ history[-1] = (message, f"Error: {result.get('error', 'Unknown error')}")
185
+ title = ""
186
+
187
+ yield history, "", session_id, get_chat_list()
188
+
189
+
190
+ def ui_rename_session(session_id: str, new_title: str):
191
+ """Rename current session"""
192
+ if session_id and new_title.strip():
193
+ session_manager.rename_session(session_id, new_title.strip())
194
+ return get_chat_list(), new_title.strip()
195
+ return get_chat_list(), ""
196
+
197
+
198
+ def ui_delete_session(session_id: str):
199
+ """Delete current session"""
200
+ if session_id:
201
+ session_manager.delete_session(session_id)
202
+ return "", [], get_chat_list(), ""
203
+
204
+
205
+ def ui_clear_chat(session_id: str):
206
+ """Clear current chat messages"""
207
+ if session_id:
208
+ session_manager.clear_session(session_id)
209
+ return []
210
+
211
+
212
+ # ══════════════════════════════════════════════════════════════════════════════
213
+ # MODEL MANAGEMENT (for Tools tab)
214
+ # ══════════════════════════════════════════════════════════════════════════════
215
+
216
+ def get_models_table() -> List[List[str]]:
217
+ """Get models for table display"""
218
+ models = model_manager.get_available_models()
219
+ rows = []
220
+ for m in models:
221
+ configs = _load_model_configs()
222
+ has_prompt = "βœ“" if m["id"] in configs else ""
223
+ rows.append([
224
+ "●" if m["loaded"] else "",
225
+ m["name"],
226
+ m["type"],
227
+ m["size"],
228
+ has_prompt
229
+ ])
230
+ return rows
231
+
232
+
233
+ def ui_load_model_by_index(evt, models_data):
234
+ """Load model by clicking on table row"""
235
+ try:
236
+ if hasattr(evt, 'index') and evt.index[0] < len(models_data):
237
+ models = model_manager.get_available_models()
238
+ if evt.index[0] < len(models):
239
+ model_id = models[evt.index[0]]["id"]
240
+ result = model_manager.load_model(model_id)
241
+ status = f"Loaded: {model_id}" if result.get("success") else f"Error: {result.get('error')}"
242
+ return get_models_table(), get_current_model_display(), status
243
+ except Exception as e:
244
+ return get_models_table(), get_current_model_display(), f"Error: {str(e)}"
245
+ return get_models_table(), get_current_model_display(), ""
246
+
247
+
248
+ def ui_unload_model():
249
+ """Unload current model"""
250
+ model_manager.unload_model()
251
+ return get_models_table(), get_current_model_display(), "Model unloaded"
252
+
253
+
254
+ def ui_save_model_prompt(model_name: str, system_prompt: str):
255
+ """Save system prompt for a model"""
256
+ if not model_name:
257
+ return "Select a model first"
258
+
259
+ # Find model ID from name
260
+ models = model_manager.get_available_models()
261
+ model_id = None
262
+ for m in models:
263
+ if m["name"] == model_name or m["id"] == model_name:
264
+ model_id = m["id"]
265
+ break
266
+
267
+ if not model_id:
268
+ return "Model not found"
269
+
270
+ configs = _load_model_configs()
271
+ if model_id not in configs:
272
+ configs[model_id] = {}
273
+ configs[model_id]["system_prompt"] = system_prompt
274
+ _save_model_configs(configs)
275
+
276
+ return f"Saved prompt for {model_name}"
277
+
278
+
279
+ def ui_get_model_prompt(model_name: str) -> str:
280
+ """Get system prompt for a model"""
281
+ models = model_manager.get_available_models()
282
+ model_id = None
283
+ for m in models:
284
+ if m["name"] == model_name or m["id"] == model_name:
285
+ model_id = m["id"]
286
+ break
287
+
288
+ if model_id:
289
+ configs = _load_model_configs()
290
+ return configs.get(model_id, {}).get("system_prompt", DEFAULT_CHAT_PROMPT)
291
+ return DEFAULT_CHAT_PROMPT
292
+
293
+
294
+ def ui_delete_model_config(model_name: str):
295
+ """Delete model from config (not the file)"""
296
+ models = model_manager.get_available_models()
297
+ model_id = None
298
+ for m in models:
299
+ if m["name"] == model_name:
300
+ model_id = m["id"]
301
+ break
302
+
303
+ if model_id:
304
+ configs = _load_model_configs()
305
+ if model_id in configs:
306
+ del configs[model_id]
307
+ _save_model_configs(configs)
308
+ return get_models_table(), "Config removed"
309
+ return get_models_table(), "Model not found"
310
+
311
+
312
+ # ══════════════════════════════════════════════════════════════════════════════
313
+ # PERSONA MANAGEMENT
314
+ # ══════════════════════════════════════════════════════════════════════════════
315
+
316
+ PERSONAS_FILE = MODELS_DIR.parent / "storage" / "personas.json"
317
+
318
+
319
+ def _load_personas() -> dict:
320
+ """Load saved personas"""
321
+ if PERSONAS_FILE.exists():
322
+ try:
323
+ return json.loads(PERSONAS_FILE.read_text())
324
+ except:
325
+ pass
326
+ return {"default": {"name": "Default", "prompt": DEFAULT_CHAT_PROMPT}}
327
+
328
+
329
+ def _save_personas(personas: dict):
330
+ """Save personas"""
331
+ PERSONAS_FILE.parent.mkdir(parents=True, exist_ok=True)
332
+ PERSONAS_FILE.write_text(json.dumps(personas, indent=2))
333
+
334
+
335
+ def get_persona_choices() -> List[str]:
336
+ """Get persona names for dropdown"""
337
+ personas = _load_personas()
338
+ return list(personas.keys())
339
+
340
+
341
+ def ui_save_persona(name: str, prompt: str):
342
+ """Save a persona"""
343
+ if not name.strip():
344
+ return get_persona_choices(), "Enter persona name"
345
+
346
+ personas = _load_personas()
347
+ personas[name.strip()] = {"name": name.strip(), "prompt": prompt}
348
+ _save_personas(personas)
349
+ return get_persona_choices(), f"Saved: {name}"
350
+
351
+
352
+ def ui_load_persona(name: str) -> str:
353
+ """Load persona prompt"""
354
+ personas = _load_personas()
355
+ return personas.get(name, {}).get("prompt", DEFAULT_CHAT_PROMPT)
356
+
357
+
358
+ def ui_delete_persona(name: str):
359
+ """Delete a persona"""
360
+ if name == "default":
361
+ return get_persona_choices(), "Cannot delete default persona"
362
+
363
+ personas = _load_personas()
364
+ if name in personas:
365
+ del personas[name]
366
+ _save_personas(personas)
367
+ return get_persona_choices(), f"Deleted: {name}"
368
+ return get_persona_choices(), "Persona not found"
369
+
370
+
371
+ # ══════════════════════════════════════════════════════════════════════════════
372
+ # SUGGESTED MODELS
373
+ # ══════════════════════════════════════════════════════════════════════════════
374
+
375
+ # Default model for quick testing (small, fast, uncensored)
376
+ DEFAULT_MODEL = {
377
+ "id": "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF",
378
+ "file": "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf",
379
+ "name": "TinyLlama 1.1B",
380
+ "size": "0.7GB"
381
+ }
382
+
383
+ SUGGESTED_MODELS = [
384
+ {"id": "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF", "file": "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf", "name": "TinyLlama 1.1B (Fast)", "size": "0.7GB", "recommended": True},
385
+ {"id": "TheBloke/phi-2-GGUF", "file": "phi-2.Q4_K_M.gguf", "name": "Phi-2 (Small & Fast)", "size": "1.6GB"},
386
+ {"id": "Qwen/Qwen2-0.5B-Instruct-GGUF", "file": "qwen2-0_5b-instruct-q4_k_m.gguf", "name": "Qwen2 0.5B (Tiny)", "size": "0.4GB"},
387
+ {"id": "TheBloke/Mistral-7B-Instruct-v0.2-GGUF", "file": "mistral-7b-instruct-v0.2.Q4_K_M.gguf", "name": "Mistral 7B Instruct", "size": "4.4GB"},
388
+ {"id": "TheBloke/Llama-2-7B-Chat-GGUF", "file": "llama-2-7b-chat.Q4_K_M.gguf", "name": "Llama 2 7B Chat", "size": "4.1GB"},
389
+ {"id": "TheBloke/OpenHermes-2.5-Mistral-7B-GGUF", "file": "openhermes-2.5-mistral-7b.Q4_K_M.gguf", "name": "OpenHermes 2.5", "size": "4.4GB"},
390
+ ]
391
+
392
+
393
+ def download_default_model() -> str:
394
+ """Download the default model if not present"""
395
+ from .hf_hub import download_model_file
396
+ from .config import MODELS_DIR
397
+
398
+ txt_dir = MODELS_DIR / "txt"
399
+ txt_dir.mkdir(parents=True, exist_ok=True)
400
+
401
+ # Check if any model exists
402
+ existing = list(txt_dir.rglob("*.gguf"))
403
+ if existing:
404
+ return f"Model already exists: {existing[0].name}"
405
+
406
+ # Download default model
407
+ result = download_model_file(DEFAULT_MODEL["id"], DEFAULT_MODEL["file"])
408
+ return result
409
+
410
+
411
+ def ensure_model_available() -> bool:
412
+ """Ensure at least one model is available, download if needed"""
413
+ models = model_manager.get_available_models()
414
+ return len(models) > 0
415
+
416
+
417
+ def get_suggested_models_table() -> List[List[str]]:
418
+ """Get suggested models for display"""
419
+ return [[m["name"], m["size"], m["id"]] for m in SUGGESTED_MODELS]
420
+
421
+
422
+ # ══════════════════════════════════════════════════════════════════════════════
423
+ # COMPATIBILITY EXPORTS
424
+ # ══════════════════════════════════════════════════════════════════════════════
425
+
426
+ def get_sessions_list(session_type: str = None) -> List[List[str]]:
427
+ """Legacy: Get sessions list"""
428
+ return get_chat_list()
429
+
430
+
431
+ def get_model_status_display() -> str:
432
+ """Legacy: Get model status"""
433
+ status = model_manager.get_status()
434
+ if status["loaded"]:
435
+ return f"Model: {status['model_id']}"
436
+ return "No model loaded"
437
+
438
+
439
+ def ui_refresh_models():
440
+ """Legacy: Refresh models"""
441
+ return get_models_table(), get_current_model_display()
442
+
443
+
444
+ def ui_load_model(evt, models_data):
445
+ """Legacy: Load model"""
446
+ return ui_load_model_by_index(evt, models_data)
modules/theme.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Theme Module
3
+ Dark theme CSS and styling.
4
+ """
5
+
6
+ DARK_CSS = """
7
+ /* GitHub Dark Theme */
8
+ .gradio-container { background: #0d1117 !important; color: #c9d1d9 !important; }
9
+ h1, h2, h3 { color: #58a6ff !important; }
10
+
11
+ /* Inputs */
12
+ input, textarea, select { background: #0d1117 !important; border: 1px solid #30363d !important; color: #c9d1d9 !important; border-radius: 6px !important; }
13
+ input:focus, textarea:focus { border-color: #58a6ff !important; }
14
+
15
+ /* Buttons */
16
+ button { background: #21262d !important; border: 1px solid #30363d !important; color: #c9d1d9 !important; }
17
+ button:hover { background: #30363d !important; }
18
+ button.primary { background: #238636 !important; border-color: #238636 !important; color: #fff !important; }
19
+ button.stop { background: #da3633 !important; border-color: #da3633 !important; color: #fff !important; }
20
+
21
+ /* Tables */
22
+ table { background: #0d1117 !important; }
23
+ th { background: #161b22 !important; color: #8b949e !important; }
24
+ td { background: #0d1117 !important; color: #c9d1d9 !important; }
25
+ tr:hover td { background: #161b22 !important; }
26
+
27
+ /* Blocks */
28
+ .block { background: #161b22 !important; border: 1px solid #30363d !important; }
29
+ pre, code { background: #161b22 !important; color: #79c0ff !important; }
30
+
31
+ /* Tabs */
32
+ .tab-nav button { background: #21262d !important; color: #8b949e !important; }
33
+ .tab-nav button.selected { background: #0d1117 !important; color: #58a6ff !important; border-bottom: 2px solid #58a6ff !important; }
34
+
35
+ /* Sidebar */
36
+ .folder-tree { background: #0d1117 !important; border: 1px solid #30363d !important; border-radius: 8px !important; padding: 10px !important; font-family: 'Consolas', monospace !important; }
37
+ .folder-tree table { background: transparent !important; }
38
+ .folder-tree td { padding: 4px 8px !important; cursor: pointer !important; }
39
+ .folder-tree tr:hover td { background: #21262d !important; }
40
+
41
+ /* Toolbar */
42
+ .toolbar { background: #161b22 !important; border: 1px solid #30363d !important; border-radius: 8px !important; padding: 8px !important; }
43
+
44
+ /* Status */
45
+ .status { background: #161b22 !important; border: 1px solid #30363d !important; color: #8b949e !important; font-size: 12px !important; }
46
+
47
+ /* Breadcrumb */
48
+ .breadcrumb { background: #161b22 !important; color: #58a6ff !important; font-family: monospace !important; }
49
+
50
+ /* Home cards */
51
+ .home-card { background: #161b22 !important; border: 1px solid #30363d !important; border-radius: 12px !important; padding: 20px !important; }
52
+
53
+ /* Scrollbar */
54
+ ::-webkit-scrollbar { width: 8px; }
55
+ ::-webkit-scrollbar-track { background: #0d1117; }
56
+ ::-webkit-scrollbar-thumb { background: #30363d; border-radius: 4px; }
57
+
58
+ label { color: #8b949e !important; }
59
+ """
60
+
61
+
62
+ def get_theme():
63
+ """Get Gradio theme configuration"""
64
+ import gradio as gr
65
+ return gr.themes.Base().set(
66
+ body_background_fill="#0d1117",
67
+ block_background_fill="#161b22",
68
+ border_color_primary="#30363d",
69
+ )
modules/ui_helpers.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ UI Helpers Module
3
+ Helper functions for Gradio UI interactions.
4
+ """
5
+ import json
6
+ import gradio as gr
7
+
8
+ from .file_manager import (
9
+ get_folder_contents, get_breadcrumb, get_status, get_quick_access_folders,
10
+ create_folder, create_file, delete_item, rename_item, write_file,
11
+ navigate_to_folder, get_file_preview
12
+ )
13
+ from .events import event_manager
14
+ from .logger import logger
15
+ from .api import api_create_event, api_delete_event, api_toggle_event, api_run_event
16
+
17
+
18
+ def ui_result(result_str: str) -> str:
19
+ """Parse result JSON and return status message"""
20
+ try:
21
+ r = json.loads(result_str)
22
+ return "βœ… Success" if r.get("success") else f"❌ {r.get('error', 'Error')}"
23
+ except:
24
+ return result_str
25
+
26
+
27
+ def ui_create_folder(name):
28
+ result = create_folder(name)
29
+ return get_folder_contents(), get_breadcrumb(), ui_result(result), get_quick_access_folders()
30
+
31
+
32
+ def ui_create_file(name, content):
33
+ result = create_file(name, content)
34
+ return get_folder_contents(), get_breadcrumb(), ui_result(result), get_quick_access_folders()
35
+
36
+
37
+ def ui_delete(name):
38
+ if not name:
39
+ return get_folder_contents(), get_breadcrumb(), "⚠️ Select item", get_quick_access_folders()
40
+ result = delete_item(name)
41
+ return get_folder_contents(), get_breadcrumb(), ui_result(result), get_quick_access_folders()
42
+
43
+
44
+ def ui_rename(old, new):
45
+ result = rename_item(old, new)
46
+ return get_folder_contents(), get_breadcrumb(), ui_result(result), get_quick_access_folders()
47
+
48
+
49
+ def ui_save(name, content):
50
+ if not name:
51
+ return "⚠️ Select file"
52
+ result = write_file(name, content)
53
+ return ui_result(result)
54
+
55
+
56
+ def on_folder_tree_select(evt: gr.SelectData, data):
57
+ """Handle folder tree selection"""
58
+ if evt.index[0] < len(data):
59
+ folder_path = data[evt.index[0]][2]
60
+ return navigate_to_folder(folder_path)
61
+ return get_folder_contents(), get_breadcrumb(), get_status(), get_quick_access_folders()
62
+
63
+
64
+ def navigate_from_main(evt: gr.SelectData, data):
65
+ """Handle click on main content - navigate into folder or show preview"""
66
+ from .file_manager import current_path
67
+ import modules.file_manager as fm
68
+
69
+ if evt.index[0] < len(data):
70
+ name = data[evt.index[0]][1]
71
+ item_type = data[evt.index[0]][2]
72
+
73
+ if item_type == "Folder":
74
+ new_path = fm.current_path / name
75
+ if new_path.exists() and new_path.is_dir():
76
+ fm.current_path = new_path
77
+ return (
78
+ get_folder_contents(),
79
+ get_breadcrumb(),
80
+ get_status(),
81
+ get_quick_access_folders(),
82
+ name,
83
+ None, "", ""
84
+ )
85
+ else:
86
+ img, txt, info = get_file_preview(name)
87
+ return (
88
+ data,
89
+ get_breadcrumb(),
90
+ get_status(),
91
+ get_quick_access_folders(),
92
+ name,
93
+ img, txt or "", info
94
+ )
95
+
96
+ return (data, get_breadcrumb(), get_status(), get_quick_access_folders(), "", None, "", "")
97
+
98
+
99
+ # Event UI helpers
100
+ def get_events_table():
101
+ events = event_manager.get_events()
102
+ rows = []
103
+ for e in events:
104
+ rows.append([
105
+ e["event_id"],
106
+ e["event_type"],
107
+ e["target_path"][:30] + "..." if len(e["target_path"]) > 30 else e["target_path"],
108
+ f"{e['interval_hours']}h",
109
+ "βœ…" if e["enabled"] else "❌",
110
+ e.get("last_run", "Never")[:16] if e.get("last_run") else "Never",
111
+ str(e["run_count"])
112
+ ])
113
+ return rows
114
+
115
+
116
+ def get_logs_display(log_type: str = "all", limit: int = 50):
117
+ logs = logger.get_logs(log_type, limit)
118
+ if not logs:
119
+ return "No logs found"
120
+
121
+ lines = []
122
+ for log in logs:
123
+ level_icon = {"INFO": "ℹ️", "ERROR": "❌", "WARNING": "⚠️", "EVENT": "πŸ“Œ", "DEBUG": "πŸ”"}.get(log["level"], "β€’")
124
+ lines.append(f"[{log['timestamp'][:19]}] {level_icon} [{log['category']}] {log['message']}")
125
+ return "\n".join(lines)
126
+
127
+
128
+ def ui_create_event(event_type, target_path, interval_hours, enabled, description):
129
+ result = api_create_event(event_type, target_path, interval_hours, enabled, description)
130
+ return get_events_table(), ui_result(result)
131
+
132
+
133
+ def ui_delete_event(event_id):
134
+ result = api_delete_event(event_id)
135
+ return get_events_table(), ui_result(result)
136
+
137
+
138
+ def ui_toggle_event(event_id):
139
+ result = api_toggle_event(event_id)
140
+ return get_events_table(), ui_result(result)
141
+
142
+
143
+ def ui_run_event(event_id):
144
+ result = api_run_event(event_id)
145
+ return get_events_table(), get_logs_display("events", 20), ui_result(result)
146
+
147
+
148
+ def ui_refresh_logs(log_type, limit):
149
+ return get_logs_display(log_type, int(limit))
150
+
151
+
152
+ def ui_clear_logs(log_type):
153
+ from .api import api_clear_logs
154
+ api_clear_logs(log_type)
155
+ return get_logs_display(log_type, 50), "οΏ½οΏ½οΏ½ Logs cleared"
156
+
157
+
158
+ def ui_copy_logs(log_type):
159
+ return logger.export_logs(log_type)
modules/utils.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility Functions Module
3
+ Common helper functions used across the application.
4
+ """
5
+ from pathlib import Path
6
+ from datetime import datetime
7
+
8
+
9
+ def format_size(size: int) -> str:
10
+ """Format bytes to human readable string"""
11
+ for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
12
+ if size < 1024:
13
+ return f"{size:.1f} {unit}"
14
+ size /= 1024
15
+ return f"{size:.1f} PB"
16
+
17
+
18
+ def get_file_icon(name: str, is_dir: bool = False) -> str:
19
+ """Get emoji icon for file type"""
20
+ if is_dir:
21
+ return "πŸ“"
22
+ ext = Path(name).suffix.lower()
23
+ icons = {
24
+ '.txt': 'πŸ“„', '.md': 'πŸ“', '.pdf': 'πŸ“•', '.doc': 'πŸ“˜', '.csv': 'πŸ“Š',
25
+ '.py': '🐍', '.js': 'πŸ“œ', '.json': 'πŸ“‹', '.html': '🌐', '.css': '🎨',
26
+ '.jpg': 'πŸ–ΌοΈ', '.jpeg': 'πŸ–ΌοΈ', '.png': 'πŸ–ΌοΈ', '.gif': 'πŸ–ΌοΈ', '.svg': 'πŸ–ΌοΈ',
27
+ '.mp3': '🎡', '.wav': '🎡', '.mp4': '🎬', '.avi': '🎬',
28
+ '.zip': 'πŸ“¦', '.tar': 'πŸ“¦', '.gz': 'πŸ“¦',
29
+ '.pt': '🧠', '.pth': '🧠', '.safetensors': '🧠', '.ckpt': '🧠', '.bin': '🧠',
30
+ '.env': 'πŸ”', '.log': 'πŸ“œ', '.sh': 'πŸ–₯️',
31
+ }
32
+ return icons.get(ext, 'πŸ“„')
33
+
34
+
35
+ def get_mime_type(name: str) -> str:
36
+ """Get mime type category for file"""
37
+ ext = Path(name).suffix.lower()
38
+ if ext in ['.jpg', '.jpeg', '.png', '.gif', '.svg', '.webp']:
39
+ return 'image'
40
+ if ext in ['.txt', '.md', '.py', '.js', '.json', '.html', '.css', '.yaml', '.yml', '.sh', '.log', '.csv']:
41
+ return 'text'
42
+ return 'binary'
43
+
44
+
45
+ def format_timestamp(ts: float) -> str:
46
+ """Format unix timestamp to string"""
47
+ return datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M")