gz412 commited on
Commit
1a04b2a
·
1 Parent(s): 57aaf95

test app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -81
app.py CHANGED
@@ -1,94 +1,47 @@
1
- import spaces
2
- print("gz start")
3
  import sys
4
- print("gz start")
5
  import torch
6
- print("gz start")
7
- print("CUDA available:", torch.cuda.is_available())
8
-
9
- import gradio as gr
10
- import opencc
11
-
12
- # 添加第三方库路径
13
- sys.path.append('third_party/Matcha-TTS')
14
 
15
- from cosyvoice.cli.cosyvoice import CosyVoice2
16
- from cosyvoice.utils.file_utils import load_wav
17
 
18
- from huggingface_hub import hf_hub_download
 
19
 
20
- # 繁简转换
21
- converter = opencc.OpenCC('s2t.json')
22
 
23
- # 加载模型
24
- cosyvoice_base = CosyVoice2(
25
- 'ASLP-lab/WSYue-TTS-Cosyvoice2',
26
- load_jit=False, load_trt=False, load_vllm=False, fp16=False
27
- )
28
- print('load model 1')
29
- cosyvoice_zjg = cosyvoice_base
30
- # cosyvoice_zjg = CosyVoice2(
31
- # 'ASLP-lab/WSYue-TTS-Cosyvoice2-zjg',
32
- # load_jit=False, load_trt=False, load_vllm=False, fp16=False
33
- # )
34
- # print('load model 2')
35
- # cosyvoice_biaobei = CosyVoice2(
36
- # 'pretrained_models/CosyVoice2-yue-biaobei',
37
- # load_jit=False, load_trt=False, load_vllm=False, fp16=False
38
- # )
39
 
40
- @spaces.GPU
41
- def tts_inference(model_choice, text, prompt_audio):
42
- # 选择模型和默认音频
43
- if model_choice == "CosyVoice2-张悦楷粤语评书":
44
- model = cosyvoice_zjg
45
- prompt_audio = "asset/sg_017_090.wav"
46
- elif model_choice == "CosyVoice2-精品女音":
47
- model = cosyvoice_base
48
- prompt_audio = "asset/F01_中立_20054.wav"
49
- elif model_choice == "CosyVoice2-base":
50
- model = cosyvoice_base
51
- if prompt_audio is None:
52
- return None, "请上传参考音频"
53
  else:
54
- return None, "未知模型"
55
-
56
- # 繁简转换
57
- text = converter.convert(text)
58
- prompt_speech_16k = load_wav(prompt_audio, 16000)
 
 
59
 
60
- all_speech = []
61
- for _, j in enumerate(
62
- model.inference_instruct2(
63
- text, "用粤语说这句话", prompt_speech_16k, stream=False
64
- )
65
- ):
66
- all_speech.append(j['tts_speech'])
67
-
68
- concatenated_speech = torch.cat(all_speech, dim=1)
69
- audio_numpy = concatenated_speech.squeeze(0).cpu().numpy()
70
- sample_rate = model.sample_rate
71
-
72
- return (sample_rate, audio_numpy), f"生成成功:{text}"
73
 
 
 
74
 
75
- # ---- Gradio Interface ----
76
- demo = gr.Interface(
77
- fn=tts_inference,
78
- inputs=[
79
- gr.Dropdown(
80
- ["CosyVoice2-base", "CosyVoice2-张悦楷粤语评书"],
81
- # ["CosyVoice2-base", "CosyVoice2-张悦楷粤语评书", "CosyVoice2-精品女音"],
82
- label="选择模型", value="CosyVoice2-base"
83
- ),
84
- gr.Textbox(lines=2, label="输入文本"),
85
- # gr.Audio(source="upload", type="filepath", label="上传参考音频(仅 CosyVoice2-base 必需)")
86
- gr.Audio(sources=["upload"], type="filepath", label="上传参考音频(仅 CosyVoice2-base 必需)")
87
- ],
88
- outputs=[
89
- gr.Audio(type="numpy", label="生成的语音"),
90
- gr.Textbox(label="状态信息")
91
- ]
92
- )
93
 
 
94
  demo.launch()
 
1
+ import os
 
2
  import sys
 
3
  import torch
4
+ import spaces
 
 
 
 
 
 
 
5
 
6
+ print("===== Application Startup =====")
 
7
 
8
+ # 不要强制关掉 CUDA,注释掉下面这一行
9
+ # os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
10
 
11
+ print("gz start")
 
12
 
13
+ print("Python version:", sys.version)
14
+ print("Torch version:", torch.__version__)
15
+ print("CUDA available:", torch.cuda.is_available())
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
+ # 尝试分配一个 Tensor 到 GPU
18
+ try:
19
+ if torch.cuda.is_available():
20
+ device = torch.device("cuda")
21
+ x = torch.rand((2, 3), device=device)
22
+ y = torch.mm(x, x.T)
23
+ print("Tensor allocated on GPU successfully:")
24
+ print(y)
 
 
 
 
 
25
  else:
26
+ print("CUDA not available, fallback to CPU")
27
+ x = torch.rand((2, 3))
28
+ y = torch.mm(x, x.T)
29
+ print("Tensor allocated on CPU successfully:")
30
+ print(y)
31
+ except Exception as e:
32
+ print("ERROR during CUDA tensor allocation:", str(e))
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
+ # ---- Gradio 测试接口 ----
36
+ import gradio as gr
37
 
38
+ @spaces.GPU
39
+ def gpu_test():
40
+ if torch.cuda.is_available():
41
+ x = torch.rand((2, 3), device="cuda")
42
+ return f"GPU OK, tensor sum={x.sum().item()}"
43
+ else:
44
+ return "No GPU detected, using CPU"
 
 
 
 
 
 
 
 
 
 
 
45
 
46
+ demo = gr.Interface(fn=gpu_test, inputs=[], outputs="text")
47
  demo.launch()