IFMedTechdemo commited on
Commit
f91f25c
·
verified ·
1 Parent(s): 7a2bcda

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +81 -196
app.py CHANGED
@@ -1,229 +1,114 @@
1
  import gradio as gr
2
  import requests
 
3
  import json
4
 
5
- # Function to get available models from Pollinations API
6
- def get_available_models():
7
- try:
8
- response = requests.get("https://text.pollinations.ai/models")
9
- if response.status_code == 200:
10
- models_data = response.json()
11
- # Extract just the model names if API returns complex structure
12
- if isinstance(models_data, list):
13
- # If it's a list of strings, return as is
14
- if all(isinstance(m, str) for m in models_data):
15
- return models_data
16
- # If it's a list of dicts, extract model names/id only
17
- elif all(isinstance(m, dict) for m in models_data):
18
- model_names = []
19
- for m in models_data:
20
- # Try to get 'name' or 'id' field, ignore everything else
21
- if 'name' in m and isinstance(m['name'], str):
22
- model_names.append(m['name'])
23
- elif 'id' in m and isinstance(m['id'], str):
24
- model_names.append(m['id'])
25
- return model_names if model_names else [
26
- "openai", "mistral", "mistral-large",
27
- "claude-3.5-sonnet", "llama-3.3-70b", "gemini"
28
- ]
29
- # Fallback to default list
30
- return [
31
- "openai",
32
- "mistral",
33
- "mistral-large",
34
- "claude-3.5-sonnet",
35
- "llama-3.3-70b",
36
- "gemini"
37
- ]
38
- else:
39
- # Fallback list of models
40
- return [
41
- "openai",
42
- "mistral",
43
- "mistral-large",
44
- "claude-3.5-sonnet",
45
- "llama-3.3-70b",
46
- "gemini"
47
- ]
48
- except:
49
- return [
50
- "openai",
51
- "mistral",
52
- "mistral-large",
53
- "claude-3.5-sonnet",
54
- "llama-3.3-70b",
55
- "gemini"
56
- ]
57
-
58
- # Function to generate text using Pollinations API
59
- def generate_text(prompt, model, seed, system, temperature, max_tokens, top_p):
60
- if not prompt:
61
- return "Please enter a prompt."
62
 
63
  try:
64
- # Prepare the API request using the same format as user's code
65
- url = "https://text.pollinations.ai/"
66
-
67
- # Build the query parameters
68
- params = {
69
- "model": model,
70
- "prompt": prompt,
71
- }
72
-
73
- # Add optional parameters if provided
74
- if seed:
75
- params["seed"] = int(seed)
76
- if system:
77
- params["system"] = system
78
- if temperature is not None:
79
- params["temperature"] = temperature
80
- if max_tokens:
81
- params["max_tokens"] = int(max_tokens)
82
- if top_p is not None:
83
- params["top_p"] = top_p
84
-
85
- # Make the request
86
  response = requests.get(url, params=params)
 
87
 
88
- if response.status_code == 200:
89
- result_text = response.text
90
-
91
- # Try to parse as JSON for better formatting
92
  try:
93
- json_result = json.loads(result_text)
94
- return f"```json\n{json.dumps(json_result, indent=2)}\n```"
95
- except:
96
- # Return as plain text if not JSON
97
- return result_text
98
  else:
99
- return f"Error: API returned status code {response.status_code}\n{response.text}"
100
-
101
- except Exception as e:
102
- return f"Error: {str(e)}"
103
-
104
- # Get available models
105
- available_models = get_available_models()
106
 
107
  # Create Gradio interface
108
- with gr.Blocks(title="Pollinations Text Generator") as demo:
109
- gr.Markdown(
110
- """
111
- # 🌸 Pollinations Text Generator
112
- Generate text using various AI models via the Pollinations API.
113
- Select a model and provide a prompt to get started!
114
- """
115
- )
116
 
117
  with gr.Row():
118
  with gr.Column():
119
  prompt_input = gr.Textbox(
120
  label="Prompt",
121
- placeholder="Enter your text prompt here...",
122
- lines=5
123
  )
124
 
125
- model_dropdown = gr.Dropdown(
126
- choices=available_models,
127
- label="Model",
128
- value=available_models[0] if available_models else "openai",
129
- info="Select the AI model to use for text generation"
130
- )
131
-
132
- with gr.Accordion("Advanced Settings", open=False):
133
- seed_input = gr.Number(
134
- label="Seed (optional)",
135
- value=None,
136
- precision=0,
137
- info="Random seed for reproducible results"
138
- )
139
-
140
- system_input = gr.Textbox(
141
- label="System Prompt (optional)",
142
- placeholder="Enter system instructions...",
143
- lines=2,
144
- info="System-level instructions for the model"
145
- )
146
-
147
- temperature_slider = gr.Slider(
148
- minimum=0,
149
- maximum=2,
150
- value=0.7,
151
- step=0.1,
152
- label="Temperature",
153
- info="Controls randomness (higher = more creative)"
154
  )
155
-
156
- max_tokens_slider = gr.Slider(
157
- minimum=1,
158
- maximum=2048,
159
- value=512,
160
- step=1,
161
- label="Max Tokens",
162
- info="Maximum length of the generated text"
163
- )
164
-
165
- top_p_slider = gr.Slider(
166
- minimum=0,
167
- maximum=1,
168
- value=0.9,
169
- step=0.05,
170
- label="Top P",
171
- info="Nucleus sampling parameter"
172
  )
173
 
174
- generate_btn = gr.Button("Generate", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
175
 
176
  with gr.Column():
177
- output_display = gr.Markdown(
178
- value="_Your generated text will appear here..._",
179
- label="Generated Text"
 
180
  )
181
-
182
- # Add a readonly textbox for easy copying
183
- with gr.Accordion("Copy Output (Plain Text)", open=False):
184
- output_copy = gr.Textbox(
185
- label="Copyable Output",
186
- lines=15,
187
- show_copy_button=True,
188
- interactive=False
189
- )
190
 
191
- gr.Markdown(
192
- """
193
- ### About
194
- This Space uses the [Pollinations API](https://github.com/pollinations/pollinations) for text generation.
195
- The API supports multiple models and is free to use.
196
-
197
- **Parameters:**
198
- - **Model**: Choose from available AI models
199
- - **Seed**: Set a random seed for reproducible outputs
200
- - **System**: Provide system-level instructions
201
- - **Temperature**: Control response creativity (0=deterministic, 2=very creative)
202
- - **Max Tokens**: Set maximum response length
203
- - **Top P**: Control diversity via nucleus sampling
204
- """
205
  )
206
 
207
- # Set up the generate button action
208
- def generate_and_display(prompt, model, seed, system, temp, max_tok, top_p):
209
- result = generate_text(prompt, model, seed, system, temp, max_tok, top_p)
210
- # Return both markdown formatted and plain text versions
211
- return result, result
 
212
 
213
- generate_btn.click(
214
- fn=generate_and_display,
215
- inputs=[
216
- prompt_input,
217
- model_dropdown,
218
- seed_input,
219
- system_input,
220
- temperature_slider,
221
- max_tokens_slider,
222
- top_p_slider
223
  ],
224
- outputs=[output_display, output_copy]
225
  )
226
 
227
- # Launch the app
228
  if __name__ == "__main__":
229
- demo.launch()
 
1
  import gradio as gr
2
  import requests
3
+ import urllib.parse
4
  import json
5
 
6
+ def get_pollinations_response(prompt, model="openai", seed=42, use_json=False, system_prompt=""):
7
+ """
8
+ Function to call Pollinations AI API
9
+ """
10
+ params = {
11
+ "model": model,
12
+ "seed": seed,
13
+ }
14
+
15
+ # Add optional parameters
16
+ if use_json:
17
+ params["json"] = "true"
18
+ if system_prompt:
19
+ params["system"] = system_prompt
20
+
21
+ # Encode the prompt
22
+ encoded_prompt = urllib.parse.quote(prompt)
23
+
24
+ # Build URL
25
+ url = f"https://text.pollinations.ai/{encoded_prompt}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  response = requests.get(url, params=params)
29
+ response.raise_for_status()
30
 
31
+ if use_json:
32
+ # Parse JSON response
 
 
33
  try:
34
+ data = json.loads(response.text)
35
+ return json.dumps(data, indent=2)
36
+ except json.JSONDecodeError:
37
+ return f"Error: API returned invalid JSON.\nRaw response: {response.text}"
 
38
  else:
39
+ return response.text
40
+
41
+ except requests.exceptions.RequestException as e:
42
+ return f"Error fetching text: {e}"
 
 
 
43
 
44
  # Create Gradio interface
45
+ with gr.Blocks(title="Pollinations AI Text API") as demo:
46
+ gr.Markdown("# Pollinations AI Text Generator")
47
+ gr.Markdown("Generate text using Pollinations AI API with customizable parameters")
 
 
 
 
 
48
 
49
  with gr.Row():
50
  with gr.Column():
51
  prompt_input = gr.Textbox(
52
  label="Prompt",
53
+ placeholder="Enter your prompt here...",
54
+ lines=3
55
  )
56
 
57
+ with gr.Row():
58
+ model_dropdown = gr.Dropdown(
59
+ choices=["openai", "mistral", "llama"],
60
+ value="openai",
61
+ label="Model"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  )
63
+ seed_number = gr.Number(
64
+ value=42,
65
+ label="Seed",
66
+ precision=0
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  )
68
 
69
+ system_input = gr.Textbox(
70
+ label="System Prompt (Optional)",
71
+ placeholder="e.g., Explain things like I'm five.",
72
+ lines=2
73
+ )
74
+
75
+ json_checkbox = gr.Checkbox(
76
+ label="Return as JSON",
77
+ value=False
78
+ )
79
+
80
+ submit_btn = gr.Button("Generate", variant="primary")
81
 
82
  with gr.Column():
83
+ output_text = gr.Textbox(
84
+ label="Response",
85
+ lines=15,
86
+ max_lines=20
87
  )
 
 
 
 
 
 
 
 
 
88
 
89
+ # Event handler
90
+ submit_btn.click(
91
+ fn=get_pollinations_response,
92
+ inputs=[prompt_input, model_dropdown, seed_number, json_checkbox, system_input],
93
+ outputs=output_text
 
 
 
 
 
 
 
 
 
94
  )
95
 
96
+ # Also trigger on Enter key press in prompt
97
+ prompt_input.submit(
98
+ fn=get_pollinations_response,
99
+ inputs=[prompt_input, model_dropdown, seed_number, json_checkbox, system_input],
100
+ outputs=output_text
101
+ )
102
 
103
+ # Examples
104
+ gr.Examples(
105
+ examples=[
106
+ ["Explain the theory of relativity simply", "openai", 42, False, ""],
107
+ ["Write a haiku about coding", "openai", 42, False, "You are a creative poet"],
108
+ ["What is machine learning?", "openai", 42, False, "Explain things like I'm five."],
 
 
 
 
109
  ],
110
+ inputs=[prompt_input, model_dropdown, seed_number, json_checkbox, system_input]
111
  )
112
 
 
113
  if __name__ == "__main__":
114
+ demo.launch()