Archime commited on
Commit
f482080
·
1 Parent(s): 47a714b

Update UI elements

Browse files
Files changed (5) hide show
  1. app.py +17 -16
  2. app/stream_utils.py +61 -9
  3. app/ui_utils.py +61 -37
  4. app/utils.py +0 -41
  5. assets/custom_style.css +10 -0
app.py CHANGED
@@ -13,7 +13,6 @@ import os
13
  from gradio.utils import get_space
14
 
15
  from app.utils import (
16
- generate_coturn_config,
17
  raise_function
18
  )
19
  from app.session_utils import (
@@ -36,6 +35,7 @@ from app.ui_utils import (
36
  )
37
 
38
  from app.stream_utils import (
 
39
  read_and_stream_audio,
40
  stop_streaming
41
  )
@@ -100,16 +100,15 @@ with gr.Blocks(theme=theme, css=css_style) as demo:
100
  """
101
  )
102
 
103
- btn = gr.Button("Proceed to Streaming", visible=True)
104
-
105
- # ui_components_one = [active_filepath, btn]
106
- # main_audio.change(fn=on_file_load, inputs=[main_audio], outputs=ui_components_one)
107
  # main_audio.stop_recording(fn=on_file_load, inputs=[main_audio], outputs=ui_components_one)
108
  # main_audio.clear(fn=on_file_load, inputs=[main_audio], outputs=ui_components_one)
109
 
110
- btn.click(lambda: gr.Walkthrough(selected=1), outputs=walkthrough)
111
 
112
- # === STEP 2 ===
113
  with gr.Step("Stream", id=1) as audio_stream:
114
  gr.Markdown("### Step 2: Start audio streaming")
115
  with gr.Group():
@@ -120,18 +119,19 @@ with gr.Blocks(theme=theme, css=css_style) as demo:
120
  modality="audio",
121
  rtc_configuration=generate_coturn_config(),
122
  visible=True,
 
123
  )
124
- start_button = gr.Button("Start Streaming")
125
 
126
  webrtc_stream.stream(
127
  fn=read_and_stream_audio,
128
  inputs=[active_filepath, session_hash, stop_streaming_flags],
129
  outputs=[webrtc_stream],
130
- trigger=start_button.click,
131
  concurrency_id="audio_stream",
132
  concurrency_limit=10,
133
  )
134
-
135
  go_to_config = gr.Button("Go to Configuration", visible=False)
136
  go_to_config.click(lambda: gr.Walkthrough(selected=2), outputs=walkthrough)
137
 
@@ -236,10 +236,10 @@ with gr.Blocks(theme=theme, css=css_style) as demo:
236
  )
237
 
238
  start_task_button = gr.Button("Start Task", visible=True)
239
- stop_button = gr.Button("Stop Streaming", visible=False)
240
  stop_task_button = gr.Button("Stop Task", visible=False)
241
 
242
- stop_button.click(
243
  fn=stop_streaming,
244
  inputs=[session_hash, stop_streaming_flags],
245
  outputs=[stop_streaming_flags],
@@ -255,12 +255,13 @@ with gr.Blocks(theme=theme, css=css_style) as demo:
255
  )
256
 
257
  ui_components = [
258
- start_button, stop_button,
259
- go_to_config, audio_source_step, status_slider
260
  ]
261
 
262
  webrtc_stream.on_additional_outputs(
263
  fn=handle_additional_outputs,
 
264
  outputs=ui_components,
265
  concurrency_id="additional_outputs_audio_stream",
266
  concurrency_limit=10,
@@ -300,10 +301,10 @@ with gr.Blocks(theme=theme, css=css_style) as demo:
300
  # start_task_button.click(
301
  # fn=toggle_task_buttons,
302
  # inputs=None,
303
- # outputs=[start_task_button, stop_task_button, stop_button],
304
  # queue=False
305
  # )
306
 
307
 
308
  if __name__ == "__main__":
309
- demo.queue(max_size=10, api_open=False).launch(show_api=False, debug=True)
 
13
  from gradio.utils import get_space
14
 
15
  from app.utils import (
 
16
  raise_function
17
  )
18
  from app.session_utils import (
 
35
  )
36
 
37
  from app.stream_utils import (
38
+ generate_coturn_config,
39
  read_and_stream_audio,
40
  stop_streaming
41
  )
 
100
  """
101
  )
102
 
103
+ btn_proceed_streaming = gr.Button("Proceed to Streaming", visible=False)
104
+ ui_components_oload_audio = [active_filepath, btn_proceed_streaming]
105
+ main_audio.change(fn=on_file_load, inputs=[main_audio], outputs=ui_components_oload_audio)
 
106
  # main_audio.stop_recording(fn=on_file_load, inputs=[main_audio], outputs=ui_components_one)
107
  # main_audio.clear(fn=on_file_load, inputs=[main_audio], outputs=ui_components_one)
108
 
109
+ btn_proceed_streaming.click(lambda: gr.Walkthrough(selected=1), outputs=walkthrough)
110
 
111
+ # === STEP 2 ===
112
  with gr.Step("Stream", id=1) as audio_stream:
113
  gr.Markdown("### Step 2: Start audio streaming")
114
  with gr.Group():
 
119
  modality="audio",
120
  rtc_configuration=generate_coturn_config(),
121
  visible=True,
122
+ inputs=main_audio
123
  )
124
+ start_stream_button = gr.Button("Start Streaming")
125
 
126
  webrtc_stream.stream(
127
  fn=read_and_stream_audio,
128
  inputs=[active_filepath, session_hash, stop_streaming_flags],
129
  outputs=[webrtc_stream],
130
+ trigger=start_stream_button.click,
131
  concurrency_id="audio_stream",
132
  concurrency_limit=10,
133
  )
134
+ status_message_stream = gr.Markdown("", elem_id="status-message-stream", visible=False)
135
  go_to_config = gr.Button("Go to Configuration", visible=False)
136
  go_to_config.click(lambda: gr.Walkthrough(selected=2), outputs=walkthrough)
137
 
 
236
  )
237
 
238
  start_task_button = gr.Button("Start Task", visible=True)
239
+ stop_stream_button = gr.Button("Stop Streaming", visible=False)
240
  stop_task_button = gr.Button("Stop Task", visible=False)
241
 
242
+ stop_stream_button.click(
243
  fn=stop_streaming,
244
  inputs=[session_hash, stop_streaming_flags],
245
  outputs=[stop_streaming_flags],
 
255
  )
256
 
257
  ui_components = [
258
+ start_stream_button, stop_stream_button,
259
+ go_to_config, audio_source_step, status_slider,walkthrough,status_message_stream
260
  ]
261
 
262
  webrtc_stream.on_additional_outputs(
263
  fn=handle_additional_outputs,
264
+ inputs=[webrtc_stream],
265
  outputs=ui_components,
266
  concurrency_id="additional_outputs_audio_stream",
267
  concurrency_limit=10,
 
301
  # start_task_button.click(
302
  # fn=toggle_task_buttons,
303
  # inputs=None,
304
+ # outputs=[start_task_button, stop_task_button, stop_stream_button],
305
  # queue=False
306
  # )
307
 
308
 
309
  if __name__ == "__main__":
310
+ demo.queue(max_size=10, api_open=False).launch(show_api=False,show_error=True, debug=True)
app/stream_utils.py CHANGED
@@ -7,10 +7,56 @@ import os
7
  import time
8
  import numpy as np
9
 
 
 
 
 
 
 
 
 
10
  # --------------------------------------------------------
11
  # Utility functions
12
  # --------------------------------------------------------
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  def read_and_stream_audio(filepath_to_stream: str, session_id: str, stop_streaming_flags: dict):
15
  """
16
  Read an audio file and stream it chunk by chunk (1s per chunk).
@@ -28,18 +74,20 @@ def read_and_stream_audio(filepath_to_stream: str, session_id: str, stop_streami
28
  segment = AudioSegment.from_file(filepath_to_stream)
29
  chunk_duration_ms = 1000
30
  total_chunks = len(segment) // chunk_duration_ms + 1
31
- logging.info(f"[{session_id}] Starting audio streaming ({total_chunks} chunks).")
32
 
33
  for i, chunk in enumerate(segment[::chunk_duration_ms]):
34
- if _is_stop_requested(stop_streaming_flags):
35
- logging.info(f"[{session_id}] Stop signal received. Terminating stream.")
36
- break
37
 
38
  frame_rate = chunk.frame_rate
39
  samples = np.array(chunk.get_array_of_samples()).reshape(1, -1)
40
  progress = round(((i + 1) / total_chunks) * 100, 2)
41
-
42
- yield ((frame_rate, samples), AdditionalOutputs(progress))
 
 
 
 
43
  logging.debug(f"[{session_id}] Sent chunk {i+1}/{total_chunks} ({progress}%).")
44
 
45
  time.sleep(1)
@@ -53,11 +101,11 @@ def read_and_stream_audio(filepath_to_stream: str, session_id: str, stop_streami
53
  yield from handle_stream_error(session_id, e, stop_streaming_flags)
54
  except Exception as e:
55
  yield from handle_stream_error(session_id, e, stop_streaming_flags)
 
56
  finally:
57
  if isinstance(stop_streaming_flags, dict):
58
  stop_streaming_flags["stop"] = False
59
  logging.info(f"[{session_id}] Stop flag reset.")
60
- yield (None, AdditionalOutputs("STREAM_DONE"))
61
 
62
 
63
 
@@ -78,8 +126,8 @@ def handle_stream_error(session_id: str, error: Exception | str, stop_streaming_
78
  if isinstance(stop_streaming_flags, dict):
79
  stop_streaming_flags["stop"] = False
80
 
81
- yield (None, AdditionalOutputs({"error": True, "message": msg}))
82
- yield (None, AdditionalOutputs("STREAM_DONE"))
83
 
84
 
85
  def _is_stop_requested(stop_streaming_flags: dict) -> bool:
@@ -100,3 +148,7 @@ def stop_streaming(session_id: str, stop_streaming_flags: dict):
100
  return stop_streaming_flags
101
 
102
 
 
 
 
 
 
7
  import time
8
  import numpy as np
9
 
10
+ import hmac
11
+ import hashlib
12
+ import base64
13
+ import os
14
+ import time
15
+ import random
16
+
17
+
18
  # --------------------------------------------------------
19
  # Utility functions
20
  # --------------------------------------------------------
21
 
22
+
23
+
24
+
25
+ def generate_coturn_config():
26
+ """
27
+ Génère une configuration Coturn complète avec authentification dynamique (use-auth-secret).
28
+ Returns:
29
+ dict: Objet coturn_config prêt à être utilisé côté client WebRTC.
30
+ """
31
+
32
+ secret_key = os.getenv("TURN_SECRET_KEY", "your_secret_key")
33
+ ttl = int(os.getenv("TURN_TTL", 3600))
34
+ turn_url = os.getenv("TURN_URL", "turn:*******")
35
+ turn_s_url = os.getenv("TURN_S_URL", "turns:*****")
36
+ user = os.getenv("TURN_USER", "client")
37
+
38
+ timestamp = int(time.time()) + ttl
39
+ username = f"{timestamp}:{user}"
40
+ password = base64.b64encode(
41
+ hmac.new(secret_key.encode(), username.encode(), hashlib.sha1).digest()
42
+ ).decode()
43
+
44
+ coturn_config = {
45
+ "iceServers": [
46
+ {
47
+ "urls": [
48
+ f"{turn_url}",
49
+ f"{turn_s_url}",
50
+ ],
51
+ "username": username,
52
+ "credential": password,
53
+ }
54
+ ]
55
+ }
56
+ return coturn_config
57
+
58
+
59
+
60
  def read_and_stream_audio(filepath_to_stream: str, session_id: str, stop_streaming_flags: dict):
61
  """
62
  Read an audio file and stream it chunk by chunk (1s per chunk).
 
74
  segment = AudioSegment.from_file(filepath_to_stream)
75
  chunk_duration_ms = 1000
76
  total_chunks = len(segment) // chunk_duration_ms + 1
77
+ logging.info(f"[{session_id}] Starting audio streaming {filepath_to_stream} ({total_chunks} chunks).")
78
 
79
  for i, chunk in enumerate(segment[::chunk_duration_ms]):
80
+
 
 
81
 
82
  frame_rate = chunk.frame_rate
83
  samples = np.array(chunk.get_array_of_samples()).reshape(1, -1)
84
  progress = round(((i + 1) / total_chunks) * 100, 2)
85
+ if _is_stop_requested(stop_streaming_flags):
86
+ logging.info(f"[{session_id}] Stop signal received. Terminating stream.")
87
+ yield ((frame_rate, samples), AdditionalOutputs({"stoped": True, "value": "STREAM_STOPED"} ) )
88
+ break
89
+
90
+ yield ((frame_rate, samples), AdditionalOutputs({"progressed": True, "value": progress} ))
91
  logging.debug(f"[{session_id}] Sent chunk {i+1}/{total_chunks} ({progress}%).")
92
 
93
  time.sleep(1)
 
101
  yield from handle_stream_error(session_id, e, stop_streaming_flags)
102
  except Exception as e:
103
  yield from handle_stream_error(session_id, e, stop_streaming_flags)
104
+
105
  finally:
106
  if isinstance(stop_streaming_flags, dict):
107
  stop_streaming_flags["stop"] = False
108
  logging.info(f"[{session_id}] Stop flag reset.")
 
109
 
110
 
111
 
 
126
  if isinstance(stop_streaming_flags, dict):
127
  stop_streaming_flags["stop"] = False
128
 
129
+ yield ((16000,np.zeros(16000, dtype=np.float32).reshape(1, -1)), AdditionalOutputs({"errored": True, "value": msg}))
130
+
131
 
132
 
133
  def _is_stop_requested(stop_streaming_flags: dict) -> bool:
 
148
  return stop_streaming_flags
149
 
150
 
151
+ def raise_function():
152
+ """Raise an error randomly (1 out of 10 times)."""
153
+ if random.randint(1, 10) == 1:
154
+ raise RuntimeError("Random failure triggered!")
app/ui_utils.py CHANGED
@@ -74,7 +74,6 @@ def to_updates(cfg):
74
 
75
  def apply_preset_if_example(filepath, auto_apply):
76
  """Si fichier = exemple ET auto_apply=True -> applique preset. Sinon, ne rien changer."""
77
- logging.info(f"apply_preset_if_example {filepath} {auto_apply} ")
78
  if not filepath or not auto_apply:
79
  updates = [gr.update() for _ in range(12)]
80
  updates.append(gr.update())
@@ -127,48 +126,67 @@ def summarize_config(
127
  )
128
  return txt
129
 
130
- def handle_additional_outputs( progress_value):
131
  """
132
  Update UI elements based on streaming progress or errors.
133
  Controls button states, audio visibility, and progress slider.
134
  """
135
- logging.debug(f"Additional output received: {progress_value}")
136
- # ui_components = [start_button, stop_button,go_to_task, audio_source_step, status_slider]
137
 
 
138
  # Handle structured error message
139
- non_ok= (
140
- gr.update(visible=True), # start_button enabled
141
- gr.update(visible=False), # stop_button disabled
142
- gr.update(visible=False), # go_to_task disabled
143
- gr.update(interactive=True), # audio_source_step re-shown
144
- gr.update(visible=False, value=0), # slider hidden
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
  )
146
- if isinstance(progress_value, dict) and progress_value.get("error"):
147
- msg = progress_value.get("message", "Unknown error.")
148
- logging.error(f"[stream_ui] Client-side error: {msg}")
149
- return non_ok
150
-
151
- try:
152
- progress = float(progress_value)
153
- except (ValueError, TypeError):
154
- progress = 0
155
-
156
- # --- Stream not started ---
157
- if progress <= 0:
158
- return non_ok
159
-
160
- # --- Stream finished ---
161
- if progress >= 100:
162
- return non_ok
163
-
164
- # --- Stream in progress ---
165
- return (
166
- gr.update(visible=False), # start_button disabled
167
- gr.update(visible=True), # stop_button enabled
168
- gr.update(visible=True), # go_to_task enabled
169
- gr.update(interactive=False), # hide audio_source_step
170
- gr.update(visible=True, value=progress), # show progress
171
- )
172
 
173
 
174
  def on_file_load(filepath):
@@ -200,4 +218,10 @@ def get_custom_theme() :
200
  css_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "assets", "custom_style.css")
201
  with open(css_path, encoding="utf-8") as f:
202
  css_style = f.read()
203
- return theme, css_style
 
 
 
 
 
 
 
74
 
75
  def apply_preset_if_example(filepath, auto_apply):
76
  """Si fichier = exemple ET auto_apply=True -> applique preset. Sinon, ne rien changer."""
 
77
  if not filepath or not auto_apply:
78
  updates = [gr.update() for _ in range(12)]
79
  updates.append(gr.update())
 
126
  )
127
  return txt
128
 
129
+ def handle_additional_outputs(webrtc_stream, msg):
130
  """
131
  Update UI elements based on streaming progress or errors.
132
  Controls button states, audio visibility, and progress slider.
133
  """
134
+ logging.debug(f"Additional output received: {msg}")
135
+ # ui_components = [start_stream_button, stop_stream_button,go_to_task, audio_source_step, status_slider,walkthrough]
136
 
137
+ progress = float(0)
138
  # Handle structured error message
139
+
140
+ if isinstance(msg, dict) and msg.get("errored"):
141
+ value = msg.get("value", "Unknown error.")
142
+ logging.error(f"[stream_ui] Client-side error: {value}")
143
+
144
+ return (
145
+ gr.update(visible=True), # start_stream_button enabled
146
+ gr.update(visible=False), # stop_stream_button disabled
147
+ gr.update(visible=False), # go_to_task disabled
148
+ gr.update(interactive=True), # audio_source_step re-shown
149
+ gr.update(visible=False, value=0), # slider hidden
150
+ gr.update(selected=1), #walkthrough
151
+ gr.update(value=f"**Error:** {value}", visible=True)
152
+ )
153
+
154
+
155
+ elif msg.get("progressed") :
156
+ value = msg.get("value", 0)
157
+ progress = float(value)
158
+ if progress == 100.00 :
159
+ return (
160
+ gr.update(visible=True), # start_stream_button disabled
161
+ gr.update(visible=False), # stop_stream_button enabled
162
+ gr.update(visible=True), # go_to_task enabled
163
+ gr.update(interactive=True), # hide audio_source_step
164
+ gr.update(visible=True, value=progress), # show progress
165
+ gr.update(), #walkthrough
166
+ gr.update(value="", visible=False)
167
+ )
168
+ else :
169
+ return (
170
+ gr.update(visible=False), # start_stream_button disabled
171
+ gr.update(visible=True), # stop_stream_button enabled
172
+ gr.update(visible=True), # go_to_task enabled
173
+ gr.update(interactive=False), # hide audio_source_step
174
+ gr.update(visible=True, value=progress), # show progress
175
+ gr.update(), #walkthrough
176
+ gr.update(value="", visible=False)
177
+ )
178
+ elif msg.get("stoped") :
179
+
180
+ return (
181
+ gr.update(visible=True), # start_stream_button disabled
182
+ gr.update(visible=False), # stop_stream_button enabled
183
+ gr.update(visible=False), # go_to_task enabled
184
+ gr.update(interactive=True), # hide audio_source_step
185
+ gr.update(visible=True, value=0), # show progress
186
+ gr.update(selected=1), #walkthrough
187
+ gr.update(value="ℹStream stopped by user.", visible=True)
188
  )
189
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
 
191
 
192
  def on_file_load(filepath):
 
218
  css_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "assets", "custom_style.css")
219
  with open(css_path, encoding="utf-8") as f:
220
  css_style = f.read()
221
+ return theme, css_style
222
+
223
+
224
+
225
+ def raise_error(message="Une erreur est survenue."):
226
+ raise gr.Error(message)
227
+
app/utils.py CHANGED
@@ -1,12 +1,6 @@
1
  import torch
2
  from app.logger_config import logger as logging
3
- import hmac
4
- import hashlib
5
- import base64
6
- import os
7
- import time
8
  import random
9
-
10
  def debug_current_device():
11
  """Safely logs GPU or CPU information without crashing on stateless GPU."""
12
  logging.debug("=== Debugging current device ===")
@@ -55,41 +49,6 @@ def get_current_device():
55
 
56
 
57
 
58
- def generate_coturn_config():
59
- """
60
- Génère une configuration Coturn complète avec authentification dynamique (use-auth-secret).
61
- Returns:
62
- dict: Objet coturn_config prêt à être utilisé côté client WebRTC.
63
- """
64
-
65
- secret_key = os.getenv("TURN_SECRET_KEY", "your_secret_key")
66
- ttl = int(os.getenv("TURN_TTL", 3600))
67
- turn_url = os.getenv("TURN_URL", "turn:*******")
68
- turn_s_url = os.getenv("TURN_S_URL", "turns:*****")
69
- user = os.getenv("TURN_USER", "client")
70
-
71
- timestamp = int(time.time()) + ttl
72
- username = f"{timestamp}:{user}"
73
- password = base64.b64encode(
74
- hmac.new(secret_key.encode(), username.encode(), hashlib.sha1).digest()
75
- ).decode()
76
-
77
- coturn_config = {
78
- "iceServers": [
79
- {
80
- "urls": [
81
- f"{turn_url}",
82
- f"{turn_s_url}",
83
- ],
84
- "username": username,
85
- "credential": password,
86
- }
87
- ]
88
- }
89
- return coturn_config
90
-
91
-
92
-
93
 
94
 
95
 
 
1
  import torch
2
  from app.logger_config import logger as logging
 
 
 
 
 
3
  import random
 
4
  def debug_current_device():
5
  """Safely logs GPU or CPU information without crashing on stateless GPU."""
6
  logging.debug("=== Debugging current device ===")
 
49
 
50
 
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
 
54
 
assets/custom_style.css CHANGED
@@ -134,4 +134,14 @@ body {
134
  background: #EFF6FF !important;
135
  border-radius: 6px;
136
  box-shadow: 0 0 6px rgba(59,130,246,0.3);
 
 
 
 
 
 
 
 
 
 
137
  }
 
134
  background: #EFF6FF !important;
135
  border-radius: 6px;
136
  box-shadow: 0 0 6px rgba(59,130,246,0.3);
137
+ }
138
+
139
+
140
+ #status-message-stream {
141
+ background-color: #FFF5F5;
142
+ border: 1px solid #DC2626;
143
+ border-radius: 6px;
144
+ padding: 0.75rem;
145
+ color: #991B1B;
146
+ font-weight: 500;
147
  }