Traceback (most recent call last):
File "/home/anthony_metzidis/miniconda3/envs/voicecraft2/lib/python3.9/site-packages/gradio/queueing.py", line 407, in call_prediction
output = await route_utils.call_process_api(
File "/home/anthony_metzidis/miniconda3/envs/voicecraft2/lib/python3.9/site-packages/gradio/route_utils.py", line 226, in call_process_api
output = await app.get_blocks().process_api(
File "/home/anthony_metzidis/miniconda3/envs/voicecraft2/lib/python3.9/site-packages/gradio/blocks.py", line 1550, in process_api
result = await self.call_function(
File "/home/anthony_metzidis/miniconda3/envs/voicecraft2/lib/python3.9/site-packages/gradio/blocks.py", line 1185, in call_function
prediction = await anyio.to_thread.run_sync(
File "/home/anthony_metzidis/miniconda3/envs/voicecraft2/lib/python3.9/site-packages/anyio/to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "/home/anthony_metzidis/miniconda3/envs/voicecraft2/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 2470, in run_sync_in_worker_thread
return await future
File "/home/anthony_metzidis/miniconda3/envs/voicecraft2/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 967, in run
result = context.run(func, *args)
File "/home/anthony_metzidis/miniconda3/envs/voicecraft2/lib/python3.9/site-packages/gradio/utils.py", line 661, in wrapper
response = f(*args, **kwargs)
File "/home/anthony_metzidis/VoiceCraft/gradio_app.py", line 309, in run
_, gen_audio = inference_one_sample(voicecraft_model["model"],
File "/home/anthony_metzidis/miniconda3/envs/voicecraft2/lib/python3.9/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
File "/home/anthony_metzidis/VoiceCraft/inference_speech_editing_scale.py", line 58, in inference_one_sample
encoded_frames = model.inference(
File "/home/anthony_metzidis/VoiceCraft/models/voicecraft.py", line 820, in inference
samples, codebook_eog, prev_token, consec_silence_count = sample_helper(n_eog, logits, codebook_eog, top_k, top_p, temperature, prev_token, consec_silence_count, stop_repetition, silence_tokens, cur_num_gen)
File "/home/anthony_metzidis/VoiceCraft/models/voicecraft.py", line 742, in sample_helper
samples = topk_sampling(
File "/home/anthony_metzidis/VoiceCraft/models/voicecraft.py", line 83, in topk_sampling
logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
File "/home/anthony_metzidis/VoiceCraft/models/voicecraft.py", line 43, in top_k_top_p_filtering
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
TypeError: topk(): argument 'k' (position 2) must be int, not float
seems to be regression in 7121981 . prior commit works well.
STEPS TO REPEAT
python gradio_app.pyACTUAL RESULTS
TypeError: topk(): argument 'k' (position 2) must be int, not floatWorkaround
git checkout 4873249ba3d6328ed60c652ef577c3c19d0a6b9aFull Traceback