hexgrad commited on
Commit
0b561c5
·
verified ·
1 Parent(s): a385437

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -9
app.py CHANGED
@@ -236,7 +236,7 @@ def _generate(text, voice, ps, speed, opening_cut, closing_cut, ease_in, ease_ou
236
  def toggle_autoplay(autoplay):
237
  return gr.Audio(interactive=False, label='Output Audio', autoplay=autoplay)
238
 
239
- USE_GPU_CHOICES = [('Auto 🔀', 'auto'), ('CPU 🚲', False), ('ZeroGPU 🏎️', True)]
240
  USE_GPU_INFOS = {
241
  'auto': 'Use CPU or GPU, whichever is faster',
242
  False: 'CPU is ~faster <100 tokens',
@@ -496,26 +496,23 @@ The average hourly cost for the 1x A100-class 80GB VRAM instances used for train
496
  ### Gradio API
497
  This Space can be used via API. The following code block can be copied and run in one Google Colab cell.
498
  ```
499
- # 1. Install the Gradio Python client
500
  !pip install -q gradio_client
501
-
502
- # 2. Initialize the client
503
  from gradio_client import Client
504
  client = Client('hexgrad/Kokoro-TTS')
505
-
506
- # 3. Call the generate endpoint, which returns a pair: an audio path and a string of output phonemes
507
  audio_path, out_ps = client.predict(
508
  text="How could I know? It's an unanswerable question. Like asking an unborn child if they'll lead a good life. They haven't even been born.",
509
  voice='af',
510
  api_name='/generate'
511
  )
512
-
513
- # 4. Display the audio and print the output phonemes
514
  from IPython.display import display, Audio
515
  display(Audio(audio_path, autoplay=True))
516
  print(out_ps)
517
  ```
518
- Note that this Space and the underlying Kokoro model are both under development and subject to change. Reliability is not guaranteed. Hugging Face and/or Gradio might enforce their own rate limits.
519
 
520
  ### Model Version History
521
  | Version | Date | Val mel / dur / f0 Losses |
 
236
  def toggle_autoplay(autoplay):
237
  return gr.Audio(interactive=False, label='Output Audio', autoplay=autoplay)
238
 
239
+ USE_GPU_CHOICES = [('Auto 🔀', 'auto'), ('CPU 💬', False), ('ZeroGPU 📝', True)]
240
  USE_GPU_INFOS = {
241
  'auto': 'Use CPU or GPU, whichever is faster',
242
  False: 'CPU is ~faster <100 tokens',
 
496
  ### Gradio API
497
  This Space can be used via API. The following code block can be copied and run in one Google Colab cell.
498
  ```
499
+ # 1️⃣ Install the Gradio Python client
500
  !pip install -q gradio_client
501
+ # 2️⃣ Initialize the client
 
502
  from gradio_client import Client
503
  client = Client('hexgrad/Kokoro-TTS')
504
+ # 3️⃣ Call the generate endpoint, which returns a pair: an audio path and a string of output phonemes
 
505
  audio_path, out_ps = client.predict(
506
  text="How could I know? It's an unanswerable question. Like asking an unborn child if they'll lead a good life. They haven't even been born.",
507
  voice='af',
508
  api_name='/generate'
509
  )
510
+ # 4️⃣ Display the audio and print the output phonemes
 
511
  from IPython.display import display, Audio
512
  display(Audio(audio_path, autoplay=True))
513
  print(out_ps)
514
  ```
515
+ This Space and the underlying Kokoro model are both under development and subject to change. Reliability is not guaranteed. Hugging Face and Gradio might enforce their own rate limits.
516
 
517
  ### Model Version History
518
  | Version | Date | Val mel / dur / f0 Losses |