<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
    <channel>
        <title>Speech Recognition on Producthunt daily</title>
        <link>https://producthunt.programnotes.cn/en/tags/speech-recognition/</link>
        <description>Recent content in Speech Recognition on Producthunt daily</description>
        <generator>Hugo -- gohugo.io</generator>
        <language>en</language>
        <lastBuildDate>Thu, 02 Apr 2026 16:14:14 +0800</lastBuildDate><atom:link href="https://producthunt.programnotes.cn/en/tags/speech-recognition/index.xml" rel="self" type="application/rss+xml" /><item>
        <title>VibeVoice</title>
        <link>https://producthunt.programnotes.cn/en/p/vibevoice/</link>
        <pubDate>Thu, 02 Apr 2026 16:14:14 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/vibevoice/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1564982547455-b9e810ae9223?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NzUxMTc2MzJ8&amp;ixlib=rb-4.1.0" alt="Featured image of post VibeVoice" /&gt;&lt;h1 id=&#34;microsoftvibevoice&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/microsoft/VibeVoice&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;microsoft/VibeVoice&lt;/a&gt;
&lt;/h1&gt;&lt;div align=&#34;center&#34;&gt;
&lt;h2 id=&#34;-vibevoice-open-source-frontier-voice-ai&#34;&gt;🎙️ VibeVoice: Open-Source Frontier Voice AI
&lt;/h2&gt;&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://microsoft.github.io/VibeVoice&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/badge/Project-Page-blue?logo=githubpages&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Project Page&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/collections/microsoft/vibevoice-68a2ef24a875c44be47b034f&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/badge/HuggingFace-Collection-orange?logo=huggingface&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Hugging Face&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://openreview.net/pdf?id=FihSkzyxdv&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/badge/TTS-Report-red?logo=arxiv&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;TTS Report&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://arxiv.org/pdf/2601.18184&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/badge/ASR-Report-yellow?logo=arxiv&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;ASR Report&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com/github/microsoft/VibeVoice/blob/main/demo/VibeVoice_colab.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/badge/StreamingTTS-Colab-green?logo=googlecolab&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Colab&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://aka.ms/vibevoice-asr&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/badge/ASR-Playground-6F42C1?logo=gradio&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;ASR Playground&#34;
	
	
&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://trendshift.io/repositories/15465&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://trendshift.io/api/badge/repositories/15465&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;microsoft%2FVibeVoice | Trendshift&#34;
	
	
&gt;&lt;/a&gt;&lt;/p&gt;
&lt;/div&gt;
&lt;div align=&#34;center&#34;&gt;
&lt;picture&gt;
  &lt;source media=&#34;(prefers-color-scheme: dark)&#34; srcset=&#34;Figures/VibeVoice_logo_white.png&#34;&gt;
  &lt;img src=&#34;Figures/VibeVoice_logo.png&#34; alt=&#34;VibeVoice Logo&#34; width=&#34;300&#34;&gt;
&lt;/picture&gt;
&lt;/div&gt;
&lt;div align=&#34;left&#34;&gt;
&lt;h3&gt;📰 News&lt;/h3&gt;
&lt;p&gt;&lt;strong&gt;🎉 VibeVoice-ASR is being adopted by the open-source community! &lt;a href=&#34;https://vibingjustspeakit.github.io/Vibing/&#34;&gt;Vibing&lt;/a&gt;, a voice-powered input method, is now built on top of VibeVoice-ASR. Download: &lt;a class=&#34;link&#34; href=&#34;https://github.com/VibingJustSpeakIt/Vibing/releases/download/v0.1.0/Vibing-v0.1.0-mac.dmg&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;macOS&lt;/a&gt; | &lt;a class=&#34;link&#34; href=&#34;https://github.com/VibingJustSpeakIt/Vibing/releases/download/v0.1.0/Vibing-v0.1.0-windows.exe&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Windows&lt;/a&gt;&lt;/strong&gt;&lt;/p&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/user-attachments/assets/db0bb23f-ae06-4135-a66a-1ff1669f4f84&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;https://github.com/user-attachments/assets/db0bb23f-ae06-4135-a66a-1ff1669f4f84&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;2026-03-06: 🚀 VibeVoice ASR is now part of a &lt;a href=&#34;https://github.com/huggingface/transformers/releases/tag/v5.3.0&#34;&gt;Transformers release&lt;/a&gt;! You can now use our speech recognition model directly through the Hugging Face Transformers library for seamless integration into your projects.&lt;/strong&gt;&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;2026-01-21:&lt;/strong&gt; 📣 We open-sourced &lt;a href=&#34;docs/vibevoice-asr.md&#34;&gt;&lt;strong&gt;VibeVoice-ASR&lt;/strong&gt;&lt;/a&gt;, a unified speech-to-text model designed to handle 60-minute long-form audio in a single pass, generating structured transcriptions containing Who (Speaker), When (Timestamps), and What (Content), with support for User-Customized Context. Try it in &lt;a class=&#34;link&#34; href=&#34;https://aka.ms/vibevoice-asr&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Playground&lt;/a&gt;.&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;⭐️ VibeVoice-ASR is natively multilingual, supporting over 50 languages — check the &lt;a class=&#34;link&#34; href=&#34;docs/vibevoice-asr.md#language-distribution&#34; &gt;supported languages&lt;/a&gt; for details.&lt;/li&gt;
&lt;li&gt;🔥 The VibeVoice-ASR &lt;a class=&#34;link&#34; href=&#34;finetuning-asr/README.md&#34; &gt;finetuning code&lt;/a&gt; is now available!&lt;/li&gt;
&lt;li&gt;⚡️ &lt;strong&gt;vLLM inference&lt;/strong&gt; is now supported for faster inference; see &lt;a class=&#34;link&#34; href=&#34;docs/vibevoice-vllm-asr.md&#34; &gt;vllm-asr&lt;/a&gt; for more details.&lt;/li&gt;
&lt;li&gt;📑 &lt;a class=&#34;link&#34; href=&#34;https://arxiv.org/pdf/2601.18184&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;VibeVoice-ASR Technique Report&lt;/a&gt; is available.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;2025-12-16: 📣 We added experimental speakers to &lt;a href=&#34;docs/vibevoice-realtime-0.5b.md&#34;&gt;&lt;strong&gt;VibeVoice‑Realtime‑0.5B&lt;/strong&gt;&lt;/a&gt; for exploration, including multilingual voices in nine languages (DE, FR, IT, JP, KR, NL, PL, PT, ES) and 11 distinct English style voices. &lt;a class=&#34;link&#34; href=&#34;docs/vibevoice-realtime-0.5b.md#optional-more-experimental-voices&#34; &gt;Try it&lt;/a&gt;. More speaker types will be added over time.&lt;/p&gt;
&lt;p&gt;2025-12-03: 📣 We open-sourced &lt;a href=&#34;docs/vibevoice-realtime-0.5b.md&#34;&gt;&lt;strong&gt;VibeVoice‑Realtime‑0.5B&lt;/strong&gt;&lt;/a&gt;, a real‑time text‑to‑speech model that supports streaming text input and robust long-form speech generation. Try it on &lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com/github/microsoft/VibeVoice/blob/main/demo/vibevoice_realtime_colab.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Colab&lt;/a&gt;.&lt;/p&gt;
&lt;p&gt;2025-09-05: VibeVoice is an open-source research framework intended to advance collaboration in the speech synthesis community. After release, we discovered instances where the tool was used in ways inconsistent with the stated intent. Since responsible use of AI is one of Microsoft’s guiding principles, we have removed the VibeVoice-TTS code from this repository.&lt;/p&gt;
&lt;p&gt;2025-08-25: 📣 We open-sourced &lt;a href=&#34;docs/vibevoice-tts.md&#34;&gt;&lt;strong&gt;VibeVoice-TTS&lt;/strong&gt;&lt;/a&gt;, a long-form multi-speaker text-to-speech model that can synthesize speech up to 90 minutes long with up to 4 distinct speakers. — accepted as an &lt;a class=&#34;link&#34; href=&#34;https://openreview.net/forum?id=FihSkzyxdv&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Oral&lt;/a&gt; at ICLR 2026! 🔥&lt;/p&gt;
&lt;/div&gt;
&lt;h2 id=&#34;overview&#34;&gt;Overview
&lt;/h2&gt;&lt;p&gt;VibeVoice is a &lt;strong&gt;family of open-source frontier voice AI models&lt;/strong&gt; that includes both Text-to-Speech (TTS) and Automatic Speech Recognition (ASR) models.&lt;/p&gt;
&lt;p&gt;A core innovation of VibeVoice is its use of continuous speech tokenizers (Acoustic and Semantic) operating at an ultra-low frame rate of &lt;strong&gt;7.5 Hz&lt;/strong&gt;. These tokenizers efficiently preserve audio fidelity while significantly boosting computational efficiency for processing long sequences. VibeVoice employs a &lt;a class=&#34;link&#34; href=&#34;https://arxiv.org/abs/2412.08635&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;next-token diffusion&lt;/a&gt; framework, leveraging a Large Language Model (LLM) to understand textual context and dialogue flow, and a diffusion head to generate high-fidelity acoustic details.&lt;/p&gt;
&lt;p&gt;For more information, demos, and examples, please visit our &lt;a class=&#34;link&#34; href=&#34;https://microsoft.github.io/VibeVoice&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Project Page&lt;/a&gt;.&lt;/p&gt;
&lt;div align=&#34;center&#34;&gt;
&lt;table&gt;
  &lt;thead&gt;
      &lt;tr&gt;
          &lt;th&gt;Model&lt;/th&gt;
          &lt;th&gt;Weight&lt;/th&gt;
          &lt;th&gt;Quick Try&lt;/th&gt;
      &lt;/tr&gt;
  &lt;/thead&gt;
  &lt;tbody&gt;
      &lt;tr&gt;
          &lt;td&gt;VibeVoice-ASR-7B&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/microsoft/VibeVoice-ASR&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;HF Link&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://aka.ms/vibevoice-asr&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Playground&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;VibeVoice-TTS-1.5B&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/microsoft/VibeVoice-1.5B&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;HF Link&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;Disabled&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;VibeVoice-Realtime-0.5B&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/microsoft/VibeVoice-Realtime-0.5B&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;HF Link&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com/github/microsoft/VibeVoice/blob/main/demo/vibevoice_realtime_colab.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Colab&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
  &lt;/tbody&gt;
&lt;/table&gt;
&lt;/div&gt;
&lt;h2 id=&#34;models&#34;&gt;Models
&lt;/h2&gt;&lt;h3 id=&#34;1--vibevoice-asr---long-form-speech-recognition&#34;&gt;1. 📖 &lt;a class=&#34;link&#34; href=&#34;docs/vibevoice-asr.md&#34; &gt;VibeVoice-ASR&lt;/a&gt; - Long-form Speech Recognition
&lt;/h3&gt;&lt;p&gt;&lt;strong&gt;VibeVoice-ASR&lt;/strong&gt; is a unified speech-to-text model designed to handle &lt;strong&gt;60-minute long-form audio&lt;/strong&gt; in a single pass, generating structured transcriptions containing &lt;strong&gt;Who (Speaker), When (Timestamps), and What (Content)&lt;/strong&gt;, with support for &lt;strong&gt;Customized Hotwords&lt;/strong&gt;.&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;🕒 60-minute Single-Pass Processing&lt;/strong&gt;:
Unlike conventional ASR models that slice audio into short chunks (often losing global context), VibeVoice ASR accepts up to &lt;strong&gt;60 minutes&lt;/strong&gt; of continuous audio input within 64K token length. This ensures consistent speaker tracking and semantic coherence across the entire hour.&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;👤 Customized Hotwords&lt;/strong&gt;:
Users can provide customized hotwords (e.g., specific names, technical terms, or background info) to guide the recognition process, significantly improving accuracy on domain-specific content.&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;📝 Rich Transcription (Who, When, What)&lt;/strong&gt;:
The model jointly performs ASR, diarization, and timestamping, producing a structured output that indicates &lt;em&gt;who&lt;/em&gt; said &lt;em&gt;what&lt;/em&gt; and &lt;em&gt;when&lt;/em&gt;.&lt;/p&gt;
&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;docs/vibevoice-asr.md&#34; &gt;📖 Documentation&lt;/a&gt; | &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/microsoft/VibeVoice-ASR&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🤗 Hugging Face&lt;/a&gt; | &lt;a class=&#34;link&#34; href=&#34;https://aka.ms/vibevoice-asr&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🎮 Playground&lt;/a&gt; | &lt;a class=&#34;link&#34; href=&#34;finetuning-asr/README.md&#34; &gt;🛠️ Finetuning&lt;/a&gt; |  &lt;a class=&#34;link&#34; href=&#34;docs/VibeVoice-ASR-Report.pdf&#34; &gt;📊 Paper&lt;/a&gt;&lt;/p&gt;
&lt;p align=&#34;center&#34;&gt;
  &lt;img src=&#34;Figures/DER.jpg&#34; alt=&#34;DER&#34; width=&#34;50%&#34;&gt;&lt;br&gt;
  &lt;img src=&#34;Figures/cpWER.jpg&#34; alt=&#34;cpWER&#34; width=&#34;50%&#34;&gt;&lt;br&gt;
  &lt;img src=&#34;Figures/tcpWER.jpg&#34; alt=&#34;tcpWER&#34; width=&#34;50%&#34;&gt;
&lt;/p&gt;
&lt;div align=&#34;center&#34; id=&#34;vibevoice-asr&#34;&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/user-attachments/assets/acde5602-dc17-4314-9e3b-c630bc84aefa&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;https://github.com/user-attachments/assets/acde5602-dc17-4314-9e3b-c630bc84aefa&lt;/a&gt;&lt;/p&gt;
&lt;/div&gt;
&lt;br&gt;
&lt;h3 id=&#34;2--vibevoice-tts---long-form-multi-speaker-tts&#34;&gt;2. 🎙️ &lt;a class=&#34;link&#34; href=&#34;docs/vibevoice-tts.md&#34; &gt;VibeVoice-TTS&lt;/a&gt; - Long-form Multi-speaker TTS
&lt;/h3&gt;&lt;p&gt;&lt;strong&gt;Best for&lt;/strong&gt;: Long-form conversational audio, podcasts, multi-speaker dialogues&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;⏱️ 90-minute Long-form Generation&lt;/strong&gt;:
Synthesizes conversational/single-speaker speech up to &lt;strong&gt;90 minutes&lt;/strong&gt; in a single pass, maintaining speaker consistency and semantic coherence throughout.&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;👥 Multi-speaker Support&lt;/strong&gt;:
Supports up to &lt;strong&gt;4 distinct speakers&lt;/strong&gt; in a single conversation, with natural turn-taking and speaker consistency across long dialogues.&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;🎭 Expressive Speech&lt;/strong&gt;:
Generates expressive, natural-sounding speech that captures conversational dynamics and emotional nuances.&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;🌐 Multi-lingual Support&lt;/strong&gt;:
Supports English, Chinese and other languages.&lt;/p&gt;
&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;docs/vibevoice-tts.md&#34; &gt;📖 Documentation&lt;/a&gt; | &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/microsoft/VibeVoice-1.5B&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🤗 Hugging Face&lt;/a&gt;  |  &lt;a class=&#34;link&#34; href=&#34;https://arxiv.org/pdf/2508.19205&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;📊 Paper&lt;/a&gt;&lt;/p&gt;
&lt;div align=&#34;center&#34;&gt;
  &lt;img src=&#34;Figures/VibeVoice-TTS-results.jpg&#34; alt=&#34;VibeVoice Results&#34; width=&#34;80%&#34;&gt;
&lt;/div&gt;
&lt;p&gt;&lt;strong&gt;English&lt;/strong&gt;&lt;/p&gt;
&lt;div align=&#34;center&#34;&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/user-attachments/assets/0967027c-141e-4909-bec8-091558b1b784&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;https://github.com/user-attachments/assets/0967027c-141e-4909-bec8-091558b1b784&lt;/a&gt;&lt;/p&gt;
&lt;/div&gt;
&lt;p&gt;&lt;strong&gt;Chinese&lt;/strong&gt;&lt;/p&gt;
&lt;div align=&#34;center&#34;&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/user-attachments/assets/322280b7-3093-4c67-86e3-10be4746c88f&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;https://github.com/user-attachments/assets/322280b7-3093-4c67-86e3-10be4746c88f&lt;/a&gt;&lt;/p&gt;
&lt;/div&gt;
&lt;p&gt;&lt;strong&gt;Cross-Lingual&lt;/strong&gt;&lt;/p&gt;
&lt;div align=&#34;center&#34;&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/user-attachments/assets/838d8ad9-a201-4dde-bb45-8cd3f59ce722&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;https://github.com/user-attachments/assets/838d8ad9-a201-4dde-bb45-8cd3f59ce722&lt;/a&gt;&lt;/p&gt;
&lt;/div&gt;
&lt;p&gt;&lt;strong&gt;Spontaneous Singing&lt;/strong&gt;&lt;/p&gt;
&lt;div align=&#34;center&#34;&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/user-attachments/assets/6f27a8a5-0c60-4f57-87f3-7dea2e11c730&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;https://github.com/user-attachments/assets/6f27a8a5-0c60-4f57-87f3-7dea2e11c730&lt;/a&gt;&lt;/p&gt;
&lt;/div&gt;
&lt;p&gt;&lt;strong&gt;Long Conversation with 4 people&lt;/strong&gt;&lt;/p&gt;
&lt;div align=&#34;center&#34;&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/user-attachments/assets/a357c4b6-9768-495c-a576-1618f6275727&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;https://github.com/user-attachments/assets/a357c4b6-9768-495c-a576-1618f6275727&lt;/a&gt;&lt;/p&gt;
&lt;/div&gt;
&lt;br&gt;
&lt;h3 id=&#34;3--vibevoice-streaming---real-time-streaming-tts&#34;&gt;3. ⚡ &lt;a class=&#34;link&#34; href=&#34;docs/vibevoice-realtime-0.5b.md&#34; &gt;VibeVoice-Streaming&lt;/a&gt; - Real-time Streaming TTS
&lt;/h3&gt;&lt;p&gt;VibeVoice-Realtime is a &lt;strong&gt;lightweight real‑time&lt;/strong&gt; text-to-speech model supporting &lt;strong&gt;streaming text input&lt;/strong&gt; and &lt;strong&gt;robust long-form speech generation&lt;/strong&gt;.&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Parameter size: 0.5B (deployment-friendly)&lt;/li&gt;
&lt;li&gt;Real-time TTS (~300 milliseconds first audible latency)&lt;/li&gt;
&lt;li&gt;Streaming text input&lt;/li&gt;
&lt;li&gt;Robust long-form speech generation (~10 minutes)&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;docs/vibevoice-realtime-0.5b.md&#34; &gt;📖 Documentation&lt;/a&gt; | &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/microsoft/VibeVoice-Realtime-0.5B&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🤗 Hugging Face&lt;/a&gt; | &lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com/github/microsoft/VibeVoice/blob/main/demo/vibevoice_realtime_colab.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🚀 Colab&lt;/a&gt;&lt;/p&gt;
&lt;div align=&#34;center&#34; id=&#34;generated-example-audio-vibevoice-realtime&#34;&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/user-attachments/assets/0901d274-f6ae-46ef-a0fd-3c4fba4f76dc&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;https://github.com/user-attachments/assets/0901d274-f6ae-46ef-a0fd-3c4fba4f76dc&lt;/a&gt;&lt;/p&gt;
&lt;/div&gt;
&lt;br&gt;
&lt;h2 id=&#34;contributing&#34;&gt;Contributing
&lt;/h2&gt;&lt;p&gt;Please see &lt;a class=&#34;link&#34; href=&#34;CONTRIBUTING.md&#34; &gt;CONTRIBUTING.md&lt;/a&gt; for detailed contribution guidelines.&lt;/p&gt;
&lt;h2 id=&#34;-risks-and-limitations&#34;&gt;⚠️ Risks and Limitations
&lt;/h2&gt;&lt;p&gt;While efforts have been made to optimize it through various techniques, it may still produce outputs that are unexpected, biased, or inaccurate. VibeVoice inherits any biases, errors, or omissions produced by its base model (specifically, Qwen2.5 1.5b in this release).
Potential for Deepfakes and Disinformation: High-quality synthetic speech can be misused to create convincing fake audio content for impersonation, fraud, or spreading disinformation. Users must ensure transcripts are reliable, check content accuracy, and avoid using generated content in misleading ways. Users are expected to use the generated content and to deploy the models in a lawful manner, in full compliance with all applicable laws and regulations in the relevant jurisdictions. It is best practice to disclose the use of AI when sharing AI-generated content.&lt;/p&gt;
&lt;p&gt;We do not recommend using VibeVoice in commercial or real-world applications without further testing and development. This model is intended for research and development purposes only. Please use responsibly.&lt;/p&gt;
&lt;h2 id=&#34;star-history&#34;&gt;Star History
&lt;/h2&gt;&lt;p&gt;&lt;img src=&#34;https://api.star-history.com/svg?repos=Microsoft/vibevoice&amp;amp;type=date&amp;amp;legend=top-left&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Star History Chart&#34;
	
	
&gt;&lt;/p&gt;
</description>
        </item>
        <item>
        <title>vosk-api</title>
        <link>https://producthunt.programnotes.cn/en/p/vosk-api/</link>
        <pubDate>Thu, 12 Jun 2025 15:29:28 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/vosk-api/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1638825183364-01e8b64b00e0?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NDk3MTMzNDd8&amp;ixlib=rb-4.1.0" alt="Featured image of post vosk-api" /&gt;&lt;h1 id=&#34;alphacepvosk-api&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/alphacep/vosk-api&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;alphacep/vosk-api&lt;/a&gt;
&lt;/h1&gt;&lt;h1 id=&#34;vosk-speech-recognition-toolkit&#34;&gt;Vosk Speech Recognition Toolkit
&lt;/h1&gt;&lt;p&gt;Vosk is an offline open source speech recognition toolkit. It enables
speech recognition for 20+ languages and dialects - English, Indian
English, German, French, Spanish, Portuguese, Chinese, Russian, Turkish,
Vietnamese, Italian, Dutch, Catalan, Arabic, Greek, Farsi, Filipino,
Ukrainian, Kazakh, Swedish, Japanese, Esperanto, Hindi, Czech, Polish.
More to come.&lt;/p&gt;
&lt;p&gt;Vosk models are small (50 Mb) but provide continuous large vocabulary
transcription, zero-latency response with streaming API, reconfigurable
vocabulary and speaker identification.&lt;/p&gt;
&lt;p&gt;Speech recognition bindings implemented for various programming languages
like Python, Java, Node.JS, C#, C++, Rust, Go and others.&lt;/p&gt;
&lt;p&gt;Vosk supplies speech recognition for chatbots, smart home appliances,
virtual assistants. It can also create subtitles for movies,
transcription for lectures and interviews.&lt;/p&gt;
&lt;p&gt;Vosk scales from small devices like Raspberry Pi or Android smartphone to
big clusters.&lt;/p&gt;
&lt;h1 id=&#34;documentation&#34;&gt;Documentation
&lt;/h1&gt;&lt;p&gt;For installation instructions, examples and documentation visit &lt;a class=&#34;link&#34; href=&#34;https://alphacephei.com/vosk&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Vosk
Website&lt;/a&gt;.&lt;/p&gt;
</description>
        </item>
        <item>
        <title>KrillinAI</title>
        <link>https://producthunt.programnotes.cn/en/p/krillinai/</link>
        <pubDate>Wed, 16 Apr 2025 15:29:17 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/krillinai/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1727175401108-6e8bf73ca114?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NDQ3ODg0NTh8&amp;ixlib=rb-4.0.3" alt="Featured image of post KrillinAI" /&gt;&lt;h1 id=&#34;krillinaikrillinai&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/krillinai/KrillinAI&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;krillinai/KrillinAI&lt;/a&gt;
&lt;/h1&gt;&lt;div align=&#34;center&#34;&gt;
  &lt;img src=&#34;./docs/images/logo.png&#34; alt=&#34;KrillinAI&#34; height=&#34;90&#34;&gt;
&lt;h1 id=&#34;ai-audiovideo-translation-and-dubbing-tool&#34;&gt;AI Audio&amp;amp;Video Translation and Dubbing Tool
&lt;/h1&gt;&lt;p&gt;&lt;a href=&#34;https://trendshift.io/repositories/13360&#34; target=&#34;_blank&#34;&gt;&lt;img src=&#34;https://trendshift.io/api/badge/repositories/13360&#34; alt=&#34;krillinai%2FKrillinAI | Trendshift&#34; style=&#34;width: 250px; height: 55px;&#34; width=&#34;250&#34; height=&#34;55&#34;/&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;&lt;a class=&#34;link&#34; href=&#34;./README.md&#34; &gt;English&lt;/a&gt;｜&lt;a class=&#34;link&#34; href=&#34;./docs/README_zh.md&#34; &gt;简体中文&lt;/a&gt;｜&lt;a class=&#34;link&#34; href=&#34;./docs/README_jp.md&#34; &gt;日本語&lt;/a&gt;｜&lt;a class=&#34;link&#34; href=&#34;./docs/README_kr.md&#34; &gt;한국어&lt;/a&gt;｜&lt;a class=&#34;link&#34; href=&#34;./docs/README_fr.md&#34; &gt;Français&lt;/a&gt;｜&lt;a class=&#34;link&#34; href=&#34;./docs/README_de.md&#34; &gt;Deutsch&lt;/a&gt;｜&lt;a class=&#34;link&#34; href=&#34;./docs/README_es.md&#34; &gt;Español&lt;/a&gt;｜&lt;a class=&#34;link&#34; href=&#34;./docs/README_pt.md&#34; &gt;Português&lt;/a&gt;｜&lt;a class=&#34;link&#34; href=&#34;./docs/README_rus.md&#34; &gt;Русский&lt;/a&gt;｜&lt;a class=&#34;link&#34; href=&#34;./docs/README_ar.md&#34; &gt;اللغة العربية&lt;/a&gt;&lt;/strong&gt;&lt;/p&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://x.com/KrillinAI&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/badge/Twitter-KrillinAI-orange?logo=twitter&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Twitter&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://space.bilibili.com/242124650&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/badge/dynamic/json?label=Bilibili&amp;amp;query=%24.data.follower&amp;amp;suffix=%20followers&amp;amp;url=https%3A%2F%2Fapi.bilibili.com%2Fx%2Frelation%2Fstat%3Fvmid%3D242124650&amp;amp;logo=bilibili&amp;amp;color=00A1D6&amp;amp;labelColor=FE7398&amp;amp;logoColor=FFFFFF&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Bilibili&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://jq.qq.com/?_wv=1027&amp;amp;k=754069680&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/badge/QQ%20%e7%be%a4-754069680-green?logo=tencent-qq&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;QQ 群&#34;
	
	
&gt;&lt;/a&gt;&lt;/p&gt;
&lt;/div&gt;
&lt;h3 id=&#34;-new-release-for-win--mac-desktop-version--welcome-to-test-and-provide-feedback&#34;&gt;📢 New Release for Win &amp;amp; Mac Desktop Version – Welcome to Test and Provide Feedback
&lt;/h3&gt;&lt;h2 id=&#34;overview&#34;&gt;Overview
&lt;/h2&gt;&lt;p&gt;Krillin AI is an all-in-one solution for effortless video localization and enhancement. This minimalist yet powerful tool handles everything from translation, dubbing to voice cloning，formatting—seamlessly converting videos between landscape and portrait modes for optimal display across all content platforms(YouTube, TikTok, Bilibili, Douyin, WeChat Channel, RedNote, Kuaishou). With its end-to-end workflow, Krillin AI transforms raw footage into polished, platform-ready content in just a few clicks.&lt;/p&gt;
&lt;h2 id=&#34;key-features&#34;&gt;Key Features:
&lt;/h2&gt;&lt;p&gt;🎯 &lt;strong&gt;One-Click Start&lt;/strong&gt; - Launch your workflow instantly,New desktop version available—easier to use!&lt;/p&gt;
&lt;p&gt;📥 &lt;strong&gt;Video download&lt;/strong&gt; - yt-dlp and local file uploading supported&lt;/p&gt;
&lt;p&gt;📜 &lt;strong&gt;Precise Subtitles&lt;/strong&gt; - Whisper-powered high-accuracy recognition&lt;/p&gt;
&lt;p&gt;🧠 &lt;strong&gt;Smart Segmentation&lt;/strong&gt; - LLM-based subtitle chunking &amp;amp; alignment&lt;/p&gt;
&lt;p&gt;🌍 &lt;strong&gt;Professional Translation&lt;/strong&gt; - Paragraph-level translation for consistency&lt;/p&gt;
&lt;p&gt;🔄 &lt;strong&gt;Term Replacement&lt;/strong&gt; - One-click domain-specific vocabulary swap&lt;/p&gt;
&lt;p&gt;🎙️ &lt;strong&gt;Dubbing and Voice Cloning&lt;/strong&gt; - CosyVoice selected or cloning voices&lt;/p&gt;
&lt;p&gt;🎬 &lt;strong&gt;Video Composition&lt;/strong&gt; - Auto-formatting for horizontal/vertical layouts&lt;/p&gt;
&lt;h2 id=&#34;showcase&#34;&gt;Showcase
&lt;/h2&gt;&lt;p&gt;The following picture demonstrates the effect after the subtitle file, which was generated through a one-click operation after importing a 46-minute local video, was inserted into the track. There was no manual adjustment involved at all. There are no missing or overlapping subtitles, the sentence segmentation is natural, and the translation quality is also quite high.&lt;/p&gt;
&lt;table&gt;
&lt;tr&gt;
&lt;td width=&#34;33%&#34;&gt;
&lt;h3 id=&#34;subtitle-translation&#34;&gt;Subtitle Translation
&lt;/h3&gt;&lt;hr&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/user-attachments/assets/bba1ac0a-fe6b-4947-b58d-ba99306d0339&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;https://github.com/user-attachments/assets/bba1ac0a-fe6b-4947-b58d-ba99306d0339&lt;/a&gt;&lt;/p&gt;
&lt;/td&gt;
&lt;td width=&#34;33%&#34;&gt;
&lt;h3 id=&#34;dubbing&#34;&gt;Dubbing
&lt;/h3&gt;&lt;hr&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/user-attachments/assets/0b32fad3-c3ad-4b6a-abf0-0865f0dd2385&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;https://github.com/user-attachments/assets/0b32fad3-c3ad-4b6a-abf0-0865f0dd2385&lt;/a&gt;&lt;/p&gt;
&lt;/td&gt;
&lt;td width=&#34;33%&#34;&gt;
&lt;h3 id=&#34;portrait&#34;&gt;Portrait
&lt;/h3&gt;&lt;hr&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/user-attachments/assets/c2c7b528-0ef8-4ba9-b8ac-f9f92f6d4e71&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;https://github.com/user-attachments/assets/c2c7b528-0ef8-4ba9-b8ac-f9f92f6d4e71&lt;/a&gt;&lt;/p&gt;
&lt;/td&gt;
&lt;/tr&gt;
&lt;/table&gt;
&lt;h2 id=&#34;-speech-recognition-support&#34;&gt;🔍 Speech Recognition Support
&lt;/h2&gt;&lt;p&gt;&lt;em&gt;&lt;strong&gt;All local models in the table below support automatic installation of executable files + model files. Just make your selection, and KrillinAI will handle everything else for you.&lt;/strong&gt;&lt;/em&gt;&lt;/p&gt;
&lt;table&gt;
  &lt;thead&gt;
      &lt;tr&gt;
          &lt;th&gt;Service&lt;/th&gt;
          &lt;th&gt;Supported Platforms&lt;/th&gt;
          &lt;th&gt;Model Options&lt;/th&gt;
          &lt;th&gt;Local/Cloud&lt;/th&gt;
          &lt;th&gt;Notes&lt;/th&gt;
      &lt;/tr&gt;
  &lt;/thead&gt;
  &lt;tbody&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;strong&gt;OpenAI Whisper&lt;/strong&gt;&lt;/td&gt;
          &lt;td&gt;Cross-platform&lt;/td&gt;
          &lt;td&gt;-&lt;/td&gt;
          &lt;td&gt;Cloud&lt;/td&gt;
          &lt;td&gt;Fast with excellent results&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;strong&gt;FasterWhisper&lt;/strong&gt;&lt;/td&gt;
          &lt;td&gt;Windows/Linux&lt;/td&gt;
          &lt;td&gt;&lt;code&gt;tiny&lt;/code&gt;/&lt;code&gt;medium&lt;/code&gt;/&lt;code&gt;large-v2&lt;/code&gt; (recommend medium+)&lt;/td&gt;
          &lt;td&gt;Local&lt;/td&gt;
          &lt;td&gt;Faster speed, no cloud service overhead&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;strong&gt;WhisperKit&lt;/strong&gt;&lt;/td&gt;
          &lt;td&gt;macOS (Apple Silicon only)&lt;/td&gt;
          &lt;td&gt;&lt;code&gt;large-v2&lt;/code&gt;&lt;/td&gt;
          &lt;td&gt;Local&lt;/td&gt;
          &lt;td&gt;Native optimization for Apple chips&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;strong&gt;Alibaba Cloud ASR&lt;/strong&gt;&lt;/td&gt;
          &lt;td&gt;Cross-platform&lt;/td&gt;
          &lt;td&gt;-&lt;/td&gt;
          &lt;td&gt;Cloud&lt;/td&gt;
          &lt;td&gt;Bypasses China mainland network issues&lt;/td&gt;
      &lt;/tr&gt;
  &lt;/tbody&gt;
&lt;/table&gt;
&lt;h2 id=&#34;-large-language-model-support&#34;&gt;🚀 Large Language Model Support
&lt;/h2&gt;&lt;p&gt;✅ Compatible with all &lt;strong&gt;OpenAI API-compatible&lt;/strong&gt; cloud/local LLM services including but not limited to:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;OpenAI&lt;/li&gt;
&lt;li&gt;DeepSeek&lt;/li&gt;
&lt;li&gt;Qwen (Tongyi Qianwen)&lt;/li&gt;
&lt;li&gt;Self-hosted open-source models&lt;/li&gt;
&lt;li&gt;Other OpenAI-format compatible API services&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;-language-support&#34;&gt;🌍 Language Support
&lt;/h2&gt;&lt;p&gt;Input languages: Chinese, English, Japanese, German, Turkish supported (more languages being added)&lt;br&gt;
Translation languages: 56 languages supported, including English, Chinese, Russian, Spanish, French, etc.&lt;/p&gt;
&lt;h2 id=&#34;interface-preview&#34;&gt;Interface Preview
&lt;/h2&gt;&lt;h2 id=&#34;-quick-start&#34;&gt;🚀 Quick Start
&lt;/h2&gt;&lt;h3 id=&#34;basic-steps&#34;&gt;Basic Steps
&lt;/h3&gt;&lt;p&gt;First, download the Release executable file that matches your device&amp;rsquo;s system. Follow the instructions below to choose between the desktop or non-desktop version, then place the software in an empty folder. Running the program will generate some directories, so keeping it in an empty folder makes management easier.&lt;/p&gt;
&lt;p&gt;[For the desktop version (release files with &amp;ldquo;desktop&amp;rdquo; in the name), refer here]&lt;br&gt;
&lt;em&gt;The desktop version is newly released to address the difficulty beginners face in editing configuration files correctly. It still has some bugs and is being continuously updated.&lt;/em&gt;&lt;/p&gt;
&lt;p&gt;Double-click the file to start using it.&lt;/p&gt;
&lt;p&gt;[For the non-desktop version (release files without &amp;ldquo;desktop&amp;rdquo; in the name), refer here]&lt;br&gt;
&lt;em&gt;The non-desktop version is the original release, with more complex configuration but stable functionality. It is also suitable for server deployment, as it provides a web-based UI.&lt;/em&gt;&lt;/p&gt;
&lt;p&gt;Create a &lt;code&gt;config&lt;/code&gt; folder in the directory, then create a &lt;code&gt;config.toml&lt;/code&gt; file inside it. Copy the contents of the &lt;code&gt;config-example.toml&lt;/code&gt; file from the source code&amp;rsquo;s &lt;code&gt;config&lt;/code&gt; directory into your &lt;code&gt;config.toml&lt;/code&gt; and fill in your configuration details. (If you want to use OpenAI models but don’t know how to get a key, you can join the group for free trial access.)&lt;/p&gt;
&lt;p&gt;Double-click the executable or run it in the terminal to start the service.&lt;/p&gt;
&lt;p&gt;Open your browser and enter http://127.0.0.1:8888 to begin using it. (Replace 8888 with the port number you specified in the config file.)&lt;/p&gt;
&lt;h3 id=&#34;to-macos-users&#34;&gt;To: macOS Users
&lt;/h3&gt;&lt;p&gt;[For the desktop version, i.e., release files with &amp;ldquo;desktop&amp;rdquo; in the name, refer here]&lt;br&gt;
The current packaging method for the desktop version cannot support direct double-click execution or DMG installation due to signing issues. Manual trust configuration is required as follows:&lt;/p&gt;
&lt;ol&gt;
&lt;li&gt;
&lt;p&gt;Open the directory containing the executable file (assuming the filename is KrillinAI_1.0.0_desktop_macOS_arm64) in Terminal&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;Execute the following commands sequentially:&lt;/p&gt;
&lt;/li&gt;
&lt;/ol&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-fallback&#34; data-lang=&#34;fallback&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;sudo xattr -cr ./KrillinAI_1.0.0_desktop_macOS_arm64  
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;sudo chmod +x ./KrillinAI_1.0.0_desktop_macOS_arm64  
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;./KrillinAI_1.0.0_desktop_macOS_arm64  
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;[For the non-desktop version, i.e., release files without &amp;ldquo;desktop&amp;rdquo; in the name, refer here]&lt;br&gt;
This software is not signed, so after completing the file configuration in the &amp;ldquo;Basic Steps,&amp;rdquo; you will need to manually trust the application on macOS. Follow these steps:&lt;/p&gt;
&lt;ol&gt;
&lt;li&gt;Open the terminal and navigate to the directory where the executable file (assuming the file name is &lt;code&gt;KrillinAI_1.0.0_macOS_arm64&lt;/code&gt;) is located.&lt;/li&gt;
&lt;li&gt;Execute the following commands in sequence:&lt;/li&gt;
&lt;/ol&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-fallback&#34; data-lang=&#34;fallback&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;sudo xattr -rd com.apple.quarantine ./KrillinAI_1.0.0_macOS_arm64
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;sudo chmod +x ./KrillinAI_1.0.0_macOS_arm64
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;./KrillinAI_1.0.0_macOS_arm64
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;This will start the service.&lt;/p&gt;
&lt;h3 id=&#34;docker-deployment&#34;&gt;Docker Deployment
&lt;/h3&gt;&lt;p&gt;This project supports Docker deployment. Please refer to the &lt;a class=&#34;link&#34; href=&#34;./docs/docker.md&#34; &gt;Docker Deployment Instructions&lt;/a&gt;.&lt;/p&gt;
&lt;h3 id=&#34;cookie-configuration-instructions&#34;&gt;Cookie Configuration Instructions
&lt;/h3&gt;&lt;p&gt;If you encounter video download failures, please refer to the &lt;a class=&#34;link&#34; href=&#34;./docs/get_cookies.md&#34; &gt;Cookie Configuration Instructions&lt;/a&gt; to configure your cookie information.&lt;/p&gt;
&lt;h3 id=&#34;configuration-help&#34;&gt;Configuration Help
&lt;/h3&gt;&lt;p&gt;The quickest and most convenient configuration method:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Select &lt;code&gt;openai&lt;/code&gt; for both &lt;code&gt;transcription_provider&lt;/code&gt; and &lt;code&gt;llm_provider&lt;/code&gt;. In this way, you only need to fill in &lt;code&gt;openai.apikey&lt;/code&gt; in the following three major configuration item categories, namely &lt;code&gt;openai&lt;/code&gt;, &lt;code&gt;local_model&lt;/code&gt;, and &lt;code&gt;aliyun&lt;/code&gt;, and then you can conduct subtitle translation. (Fill in &lt;code&gt;app.proxy&lt;/code&gt;, &lt;code&gt;model&lt;/code&gt; and &lt;code&gt;openai.base_url&lt;/code&gt; as per your own situation.)&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;The configuration method for using the local speech recognition model (macOS is not supported for the time being) (a choice that takes into account cost, speed, and quality):&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Fill in &lt;code&gt;fasterwhisper&lt;/code&gt; for &lt;code&gt;transcription_provider&lt;/code&gt; and &lt;code&gt;openai&lt;/code&gt; for &lt;code&gt;llm_provider&lt;/code&gt;. In this way, you only need to fill in &lt;code&gt;openai.apikey&lt;/code&gt; and &lt;code&gt;local_model.faster_whisper&lt;/code&gt; in the following three major configuration item categories, namely &lt;code&gt;openai&lt;/code&gt; and &lt;code&gt;local_model&lt;/code&gt;, and then you can conduct subtitle translation. The local model will be downloaded automatically. (The same applies to &lt;code&gt;app.proxy&lt;/code&gt; and &lt;code&gt;openai.base_url&lt;/code&gt; as mentioned above.)&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;The following usage situations require the configuration of Alibaba Cloud:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;If &lt;code&gt;llm_provider&lt;/code&gt; is filled with &lt;code&gt;aliyun&lt;/code&gt;, it indicates that the large model service of Alibaba Cloud will be used. Consequently, the configuration of the &lt;code&gt;aliyun.bailian&lt;/code&gt; item needs to be set up.&lt;/li&gt;
&lt;li&gt;If &lt;code&gt;transcription_provider&lt;/code&gt; is filled with &lt;code&gt;aliyun&lt;/code&gt;, or if the &amp;ldquo;voice dubbing&amp;rdquo; function is enabled when starting a task, the voice service of Alibaba Cloud will be utilized. Therefore, the configuration of the &lt;code&gt;aliyun.speech&lt;/code&gt; item needs to be filled in.&lt;/li&gt;
&lt;li&gt;If the &amp;ldquo;voice dubbing&amp;rdquo; function is enabled and local audio files are uploaded for voice timbre cloning at the same time, the OSS cloud storage service of Alibaba Cloud will also be used. Hence, the configuration of the &lt;code&gt;aliyun.oss&lt;/code&gt; item needs to be filled in.
Configuration Guide: &lt;a class=&#34;link&#34; href=&#34;./docs/aliyun.md&#34; &gt;Alibaba Cloud Configuration Instructions&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;frequently-asked-questions&#34;&gt;Frequently Asked Questions
&lt;/h2&gt;&lt;p&gt;Please refer to &lt;a class=&#34;link&#34; href=&#34;./docs/faq.md&#34; &gt;Frequently Asked Questions&lt;/a&gt;&lt;/p&gt;
&lt;h2 id=&#34;contribution-guidelines&#34;&gt;Contribution Guidelines
&lt;/h2&gt;&lt;ul&gt;
&lt;li&gt;Do not submit unnecessary files like &lt;code&gt;.vscode&lt;/code&gt;, &lt;code&gt;.idea&lt;/code&gt;, etc. Please make good use of &lt;code&gt;.gitignore&lt;/code&gt; to filter them.&lt;/li&gt;
&lt;li&gt;Do not submit &lt;code&gt;config.toml&lt;/code&gt;; instead, submit &lt;code&gt;config-example.toml&lt;/code&gt;.&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;star-history&#34;&gt;Star History
&lt;/h2&gt;&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://star-history.com/#krillinai/KrillinAI&amp;amp;Date&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://api.star-history.com/svg?repos=krillinai/KrillinAI&amp;amp;type=Date&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Star History Chart&#34;
	
	
&gt;&lt;/a&gt;&lt;/p&gt;
</description>
        </item>
        <item>
        <title>FunASR</title>
        <link>https://producthunt.programnotes.cn/en/p/funasr/</link>
        <pubDate>Wed, 09 Apr 2025 15:29:01 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/funasr/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1695807216937-fddfaa1f63ac?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NDQxODM2NTd8&amp;ixlib=rb-4.0.3" alt="Featured image of post FunASR" /&gt;&lt;h1 id=&#34;modelscopefunasr&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/modelscope/FunASR&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;modelscope/FunASR&lt;/a&gt;
&lt;/h1&gt;&lt;p&gt;(&lt;a class=&#34;link&#34; href=&#34;./README_zh.md&#34; &gt;简体中文&lt;/a&gt;|English)&lt;/p&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/Akshay090/svg-banners&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://svg-banners.vercel.app/api?type=origin&amp;amp;text1=FunASR%f0%9f%a4%a0&amp;amp;text2=%f0%9f%92%96%20A%20Fundamental%20End-to-End%20Speech%20Recognition%20Toolkit&amp;amp;width=800&amp;amp;height=210&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;SVG Banners&#34;
	
	
&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://pypi.org/project/funasr/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/pypi/v/funasr&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;PyPI&#34;
	
	
&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p align=&#34;center&#34;&gt;
&lt;a href=&#34;https://trendshift.io/repositories/3839&#34; target=&#34;_blank&#34;&gt;&lt;img src=&#34;https://trendshift.io/api/badge/repositories/3839&#34; alt=&#34;alibaba-damo-academy%2FFunASR | Trendshift&#34; style=&#34;width: 250px; height: 55px;&#34; width=&#34;250&#34; height=&#34;55&#34;/&gt;&lt;/a&gt;
&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;FunASR&lt;/strong&gt; hopes to build a bridge between academic research and industrial applications on speech recognition. By supporting the training &amp;amp; finetuning of the industrial-grade speech recognition model, researchers and developers can conduct research and production of speech recognition models more conveniently, and promote the development of speech recognition ecology. ASR for Fun！&lt;/p&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;#highlights&#34; &gt;&lt;strong&gt;Highlights&lt;/strong&gt;&lt;/a&gt;
| &lt;a class=&#34;link&#34; href=&#34;https://github.com/alibaba-damo-academy/FunASR#whats-new&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;strong&gt;News&lt;/strong&gt;&lt;/a&gt;
| &lt;a class=&#34;link&#34; href=&#34;#installation&#34; &gt;&lt;strong&gt;Installation&lt;/strong&gt;&lt;/a&gt;
| &lt;a class=&#34;link&#34; href=&#34;#quick-start&#34; &gt;&lt;strong&gt;Quick Start&lt;/strong&gt;&lt;/a&gt;
| &lt;a class=&#34;link&#34; href=&#34;https://github.com/alibaba-damo-academy/FunASR/blob/main/docs/tutorial/README.md&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;strong&gt;Tutorial&lt;/strong&gt;&lt;/a&gt;
| &lt;a class=&#34;link&#34; href=&#34;./runtime/readme.md&#34; &gt;&lt;strong&gt;Runtime&lt;/strong&gt;&lt;/a&gt;
| &lt;a class=&#34;link&#34; href=&#34;#model-zoo&#34; &gt;&lt;strong&gt;Model Zoo&lt;/strong&gt;&lt;/a&gt;
| &lt;a class=&#34;link&#34; href=&#34;#contact&#34; &gt;&lt;strong&gt;Contact&lt;/strong&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;&lt;a name=&#34;highlights&#34;&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h2 id=&#34;highlights&#34;&gt;Highlights
&lt;/h2&gt;&lt;ul&gt;
&lt;li&gt;FunASR is a fundamental speech recognition toolkit that offers a variety of features, including speech recognition (ASR), Voice Activity Detection (VAD), Punctuation Restoration, Language Models, Speaker Verification, Speaker Diarization and multi-talker ASR. FunASR provides convenient scripts and tutorials, supporting inference and fine-tuning of pre-trained models.&lt;/li&gt;
&lt;li&gt;We have released a vast collection of academic and industrial pretrained models on the &lt;a class=&#34;link&#34; href=&#34;https://www.modelscope.cn/models?page=1&amp;amp;tasks=auto-speech-recognition&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;ModelScope&lt;/a&gt; and &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/FunASR&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;huggingface&lt;/a&gt;, which can be accessed through our &lt;a class=&#34;link&#34; href=&#34;https://github.com/alibaba-damo-academy/FunASR/blob/main/docs/model_zoo/modelscope_models.md&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Model Zoo&lt;/a&gt;. The representative &lt;a class=&#34;link&#34; href=&#34;https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Paraformer-large&lt;/a&gt;, a non-autoregressive end-to-end speech recognition model, has the advantages of high accuracy, high efficiency, and convenient deployment, supporting the rapid construction of speech recognition services. For more details on service deployment, please refer to the &lt;a class=&#34;link&#34; href=&#34;runtime/readme_cn.md&#34; &gt;service deployment document&lt;/a&gt;.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;&lt;a name=&#34;whats-new&#34;&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h2 id=&#34;whats-new&#34;&gt;What&amp;rsquo;s new:
&lt;/h2&gt;&lt;ul&gt;
&lt;li&gt;2024/10/29: Real-time Transcription Service 1.12 released, The 2pass-offline mode supports the SensevoiceSmal model；(&lt;a class=&#34;link&#34; href=&#34;runtime/readme.md&#34; &gt;docs&lt;/a&gt;);&lt;/li&gt;
&lt;li&gt;2024/10/10：Added support for the Whisper-large-v3-turbo model, a multitasking model that can perform multilingual speech recognition, speech translation, and language identification. It can be downloaded from the &lt;a class=&#34;link&#34; href=&#34;examples/industrial_data_pretraining/whisper/demo.py&#34; &gt;modelscope&lt;/a&gt;, and &lt;a class=&#34;link&#34; href=&#34;examples/industrial_data_pretraining/whisper/demo_from_openai.py&#34; &gt;openai&lt;/a&gt;.&lt;/li&gt;
&lt;li&gt;2024/09/26: Offline File Transcription Service 4.6, Offline File Transcription Service of English 1.7, Real-time Transcription Service 1.11 released, fix memory leak &amp;amp; Support the SensevoiceSmall onnx model；File Transcription Service 2.0 GPU released, Fix GPU memory leak; (&lt;a class=&#34;link&#34; href=&#34;runtime/readme.md&#34; &gt;docs&lt;/a&gt;);&lt;/li&gt;
&lt;li&gt;2024/09/25：keyword spotting models are new supported. Supports fine-tuning and inference for four models: &lt;a class=&#34;link&#34; href=&#34;https://modelscope.cn/models/iic/speech_sanm_kws_phone-xiaoyun-commands-online&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;fsmn_kws&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://modelscope.cn/models/iic/speech_sanm_kws_phone-xiaoyun-commands-online&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;fsmn_kws_mt&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://modelscope.cn/models/iic/speech_sanm_kws_phone-xiaoyun-commands-offline&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;sanm_kws&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://modelscope.cn/models/iic/speech_sanm_kws_phone-xiaoyun-commands-online&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;sanm_kws_streaming&lt;/a&gt;.&lt;/li&gt;
&lt;li&gt;2024/07/04：&lt;a class=&#34;link&#34; href=&#34;https://github.com/FunAudioLLM/SenseVoice&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;SenseVoice&lt;/a&gt; is a speech foundation model with multiple speech understanding capabilities, including ASR, LID, SER, and AED.&lt;/li&gt;
&lt;li&gt;2024/07/01: Offline File Transcription Service GPU 1.1 released, optimize BladeDISC model compatibility issues; ref to (&lt;a class=&#34;link&#34; href=&#34;runtime/readme.md&#34; &gt;docs&lt;/a&gt;)&lt;/li&gt;
&lt;li&gt;2024/06/27: Offline File Transcription Service GPU 1.0 released, supporting dynamic batch processing and multi-threading concurrency. In the long audio test set, the single-thread RTF is 0.0076, and multi-threads&amp;rsquo; speedup is 1200+ (compared to 330+ on CPU); ref to (&lt;a class=&#34;link&#34; href=&#34;runtime/readme.md&#34; &gt;docs&lt;/a&gt;)&lt;/li&gt;
&lt;li&gt;2024/05/15：emotion recognition models are new supported. &lt;a class=&#34;link&#34; href=&#34;https://modelscope.cn/models/iic/emotion2vec_plus_large/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;emotion2vec+large&lt;/a&gt;，&lt;a class=&#34;link&#34; href=&#34;https://modelscope.cn/models/iic/emotion2vec_plus_base/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;emotion2vec+base&lt;/a&gt;，&lt;a class=&#34;link&#34; href=&#34;https://modelscope.cn/models/iic/emotion2vec_plus_seed/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;emotion2vec+seed&lt;/a&gt;. currently supports the following categories: 0: angry 1: happy 2: neutral 3: sad 4: unknown.&lt;/li&gt;
&lt;li&gt;2024/05/15: Offline File Transcription Service 4.5, Offline File Transcription Service of English 1.6, Real-time Transcription Service 1.10 released, adapting to FunASR 1.0 model structure；(&lt;a class=&#34;link&#34; href=&#34;runtime/readme.md&#34; &gt;docs&lt;/a&gt;)&lt;/li&gt;
&lt;/ul&gt;
&lt;details&gt;&lt;summary&gt;Full Changelog&lt;/summary&gt;
&lt;ul&gt;
&lt;li&gt;2024/03/05：Added the Qwen-Audio and Qwen-Audio-Chat large-scale audio-text multimodal models, which have topped multiple audio domain leaderboards. These models support speech dialogue, &lt;a class=&#34;link&#34; href=&#34;examples/industrial_data_pretraining/qwen_audio&#34; &gt;usage&lt;/a&gt;.&lt;/li&gt;
&lt;li&gt;2024/03/05：Added support for the Whisper-large-v3 model, a multitasking model that can perform multilingual speech recognition, speech translation, and language identification. It can be downloaded from the&lt;a class=&#34;link&#34; href=&#34;examples/industrial_data_pretraining/whisper/demo.py&#34; &gt;modelscope&lt;/a&gt;, and &lt;a class=&#34;link&#34; href=&#34;examples/industrial_data_pretraining/whisper/demo_from_openai.py&#34; &gt;openai&lt;/a&gt;.&lt;/li&gt;
&lt;li&gt;2024/03/05: Offline File Transcription Service 4.4, Offline File Transcription Service of English 1.5，Real-time Transcription Service 1.9 released，docker image supports ARM64 platform, update modelscope；(&lt;a class=&#34;link&#34; href=&#34;runtime/readme.md&#34; &gt;docs&lt;/a&gt;)&lt;/li&gt;
&lt;li&gt;2024/01/30：funasr-1.0 has been released (&lt;a class=&#34;link&#34; href=&#34;https://github.com/alibaba-damo-academy/FunASR/discussions/1319&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;docs&lt;/a&gt;)&lt;/li&gt;
&lt;li&gt;2024/01/30：emotion recognition models are new supported. &lt;a class=&#34;link&#34; href=&#34;https://www.modelscope.cn/models/iic/emotion2vec_base_finetuned/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;model link&lt;/a&gt;, modified from &lt;a class=&#34;link&#34; href=&#34;https://github.com/ddlBoJack/emotion2vec&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;repo&lt;/a&gt;.&lt;/li&gt;
&lt;li&gt;2024/01/25: Offline File Transcription Service 4.2, Offline File Transcription Service of English 1.3 released，optimized the VAD (Voice Activity Detection) data processing method, significantly reducing peak memory usage, memory leak optimization; Real-time Transcription Service 1.7 released，optimizatized the client-side；(&lt;a class=&#34;link&#34; href=&#34;runtime/readme.md&#34; &gt;docs&lt;/a&gt;)&lt;/li&gt;
&lt;li&gt;2024/01/09: The Funasr SDK for Windows version 2.0 has been released, featuring support for The offline file transcription service (CPU) of Mandarin 4.1, The offline file transcription service (CPU) of English 1.2, The real-time transcription service (CPU) of Mandarin 1.6. For more details, please refer to the official documentation or release notes(&lt;a class=&#34;link&#34; href=&#34;https://www.modelscope.cn/models/damo/funasr-runtime-win-cpu-x64/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;FunASR-Runtime-Windows&lt;/a&gt;)&lt;/li&gt;
&lt;li&gt;2024/01/03: File Transcription Service 4.0 released, Added support for 8k models, optimized timestamp mismatch issues and added sentence-level timestamps, improved the effectiveness of English word FST hotwords, supported automated configuration of thread parameters, and fixed known crash issues as well as memory leak problems, refer to (&lt;a class=&#34;link&#34; href=&#34;runtime/readme.md#file-transcription-service-mandarin-cpu&#34; &gt;docs&lt;/a&gt;).&lt;/li&gt;
&lt;li&gt;2024/01/03: Real-time Transcription Service 1.6 released，The 2pass-offline mode supports Ngram language model decoding and WFST hotwords, while also addressing known crash issues and memory leak problems, (&lt;a class=&#34;link&#34; href=&#34;runtime/readme.md#the-real-time-transcription-service-mandarin-cpu&#34; &gt;docs&lt;/a&gt;)&lt;/li&gt;
&lt;li&gt;2024/01/03: Fixed known crash issues as well as memory leak problems, (&lt;a class=&#34;link&#34; href=&#34;runtime/readme.md#file-transcription-service-english-cpu&#34; &gt;docs&lt;/a&gt;).&lt;/li&gt;
&lt;li&gt;2023/12/04: The Funasr SDK for Windows version 1.0 has been released, featuring support for The offline file transcription service (CPU) of Mandarin, The offline file transcription service (CPU) of English, The real-time transcription service (CPU) of Mandarin. For more details, please refer to the official documentation or release notes(&lt;a class=&#34;link&#34; href=&#34;https://www.modelscope.cn/models/damo/funasr-runtime-win-cpu-x64/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;FunASR-Runtime-Windows&lt;/a&gt;)&lt;/li&gt;
&lt;li&gt;2023/11/08: The offline file transcription service 3.0 (CPU) of Mandarin has been released, adding punctuation large model, Ngram language model, and wfst hot words. For detailed information, please refer to &lt;a class=&#34;link&#34; href=&#34;runtime#file-transcription-service-mandarin-cpu&#34; &gt;docs&lt;/a&gt;.&lt;/li&gt;
&lt;li&gt;2023/10/17: The offline file transcription service (CPU) of English has been released. For more details, please refer to (&lt;a class=&#34;link&#34; href=&#34;runtime#file-transcription-service-english-cpu&#34; &gt;docs&lt;/a&gt;).&lt;/li&gt;
&lt;li&gt;2023/10/13: &lt;a class=&#34;link&#34; href=&#34;https://slidespeech.github.io/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;SlideSpeech&lt;/a&gt;: A large scale multi-modal audio-visual corpus with a significant amount of real-time synchronized slides.&lt;/li&gt;
&lt;li&gt;2023/10/10: The ASR-SpeakersDiarization combined pipeline &lt;a class=&#34;link&#34; href=&#34;https://github.com/alibaba-damo-academy/FunASR/blob/main/egs_modelscope/asr_vad_spk/speech_paraformer-large-vad-punc-spk_asr_nat-zh-cn/demo.py&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Paraformer-VAD-SPK&lt;/a&gt; is now released. Experience the model to get recognition results with speaker information.&lt;/li&gt;
&lt;li&gt;2023/10/07: &lt;a class=&#34;link&#34; href=&#34;https://github.com/alibaba-damo-academy/FunCodec&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;FunCodec&lt;/a&gt;: A Fundamental, Reproducible and Integrable Open-source Toolkit for Neural Speech Codec.&lt;/li&gt;
&lt;li&gt;2023/09/01: The offline file transcription service 2.0 (CPU) of Mandarin has been released, with added support for ffmpeg, timestamp, and hotword models. For more details, please refer to (&lt;a class=&#34;link&#34; href=&#34;runtime#file-transcription-service-mandarin-cpu&#34; &gt;docs&lt;/a&gt;).&lt;/li&gt;
&lt;li&gt;2023/08/07: The real-time transcription service (CPU) of Mandarin has been released. For more details, please refer to (&lt;a class=&#34;link&#34; href=&#34;runtime#the-real-time-transcription-service-mandarin-cpu&#34; &gt;docs&lt;/a&gt;).&lt;/li&gt;
&lt;li&gt;2023/07/17: BAT is released, which is a low-latency and low-memory-consumption RNN-T model. For more details, please refer to (&lt;a class=&#34;link&#34; href=&#34;egs/aishell/bat&#34; &gt;BAT&lt;/a&gt;).&lt;/li&gt;
&lt;li&gt;2023/06/26: ASRU2023 Multi-Channel Multi-Party Meeting Transcription Challenge 2.0 completed the competition and announced the results. For more details, please refer to (&lt;a class=&#34;link&#34; href=&#34;https://alibaba-damo-academy.github.io/FunASR/m2met2/index.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;M2MeT2.0&lt;/a&gt;).&lt;/li&gt;
&lt;/ul&gt;
&lt;/details&gt;
&lt;p&gt;&lt;a name=&#34;Installation&#34;&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h2 id=&#34;installation&#34;&gt;Installation
&lt;/h2&gt;&lt;ul&gt;
&lt;li&gt;Requirements&lt;/li&gt;
&lt;/ul&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-text&#34; data-lang=&#34;text&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;python&amp;gt;=3.8
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;torch&amp;gt;=1.13
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;torchaudio
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;ul&gt;
&lt;li&gt;Install for pypi&lt;/li&gt;
&lt;/ul&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-shell&#34; data-lang=&#34;shell&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;pip3 install -U funasr
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;ul&gt;
&lt;li&gt;Or install from source code&lt;/li&gt;
&lt;/ul&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-sh&#34; data-lang=&#34;sh&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;git clone https://github.com/alibaba/FunASR.git &lt;span class=&#34;o&#34;&gt;&amp;amp;&amp;amp;&lt;/span&gt; &lt;span class=&#34;nb&#34;&gt;cd&lt;/span&gt; FunASR
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;pip3 install -e ./
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;ul&gt;
&lt;li&gt;Install modelscope or huggingface_hub for the pretrained models (Optional)&lt;/li&gt;
&lt;/ul&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-shell&#34; data-lang=&#34;shell&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;pip3 install -U modelscope huggingface_hub
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h2 id=&#34;model-zoo&#34;&gt;Model Zoo
&lt;/h2&gt;&lt;p&gt;FunASR has open-sourced a large number of pre-trained models on industrial data. You are free to use, copy, modify, and share FunASR models under the &lt;a class=&#34;link&#34; href=&#34;./MODEL_LICENSE&#34; &gt;Model License Agreement&lt;/a&gt;. Below are some representative models, for more models please refer to the &lt;a class=&#34;link&#34; href=&#34;./model_zoo&#34; &gt;Model Zoo&lt;/a&gt;.&lt;/p&gt;
&lt;p&gt;(Note: ⭐ represents the ModelScope model zoo, 🤗 represents the Huggingface model zoo, 🍀 represents the OpenAI model zoo)&lt;/p&gt;
&lt;table&gt;
  &lt;thead&gt;
      &lt;tr&gt;
          &lt;th style=&#34;text-align: center&#34;&gt;Model Name&lt;/th&gt;
          &lt;th style=&#34;text-align: center&#34;&gt;Task Details&lt;/th&gt;
          &lt;th style=&#34;text-align: center&#34;&gt;Training Data&lt;/th&gt;
          &lt;th style=&#34;text-align: center&#34;&gt;Parameters&lt;/th&gt;
      &lt;/tr&gt;
  &lt;/thead&gt;
  &lt;tbody&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;SenseVoiceSmall &lt;br&gt; (&lt;a class=&#34;link&#34; href=&#34;https://www.modelscope.cn/models/iic/SenseVoiceSmall&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;⭐&lt;/a&gt;  &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/FunAudioLLM/SenseVoiceSmall&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🤗&lt;/a&gt; )&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;multiple speech understanding capabilities, including ASR, ITN, LID, SER, and AED, support languages such as zh, yue, en, ja, ko&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;300000 hours&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;234M&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;paraformer-zh &lt;br&gt; (&lt;a class=&#34;link&#34; href=&#34;https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;⭐&lt;/a&gt;  &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/funasr/paraformer-zh&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🤗&lt;/a&gt; )&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;speech recognition, with timestamps, non-streaming&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;60000 hours, Mandarin&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;220M&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;&lt;nobr&gt;paraformer-zh-streaming &lt;br&gt; ( &lt;a class=&#34;link&#34; href=&#34;https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;⭐&lt;/a&gt; &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/funasr/paraformer-zh-streaming&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🤗&lt;/a&gt; )&lt;/nobr&gt;&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;speech recognition, streaming&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;60000 hours, Mandarin&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;220M&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;paraformer-en &lt;br&gt; ( &lt;a class=&#34;link&#34; href=&#34;https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-en-16k-common-vocab10020/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;⭐&lt;/a&gt; &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/funasr/paraformer-en&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🤗&lt;/a&gt; )&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;speech recognition, without timestamps, non-streaming&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;50000 hours, English&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;220M&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;conformer-en &lt;br&gt; ( &lt;a class=&#34;link&#34; href=&#34;https://modelscope.cn/models/damo/speech_conformer_asr-en-16k-vocab4199-pytorch/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;⭐&lt;/a&gt; &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/funasr/conformer-en&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🤗&lt;/a&gt; )&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;speech recognition, non-streaming&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;50000 hours, English&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;220M&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;ct-punc &lt;br&gt; ( &lt;a class=&#34;link&#34; href=&#34;https://modelscope.cn/models/damo/punc_ct-transformer_cn-en-common-vocab471067-large/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;⭐&lt;/a&gt; &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/funasr/ct-punc&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🤗&lt;/a&gt; )&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;punctuation restoration&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;100M, Mandarin and English&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;290M&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;fsmn-vad &lt;br&gt; ( &lt;a class=&#34;link&#34; href=&#34;https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;⭐&lt;/a&gt; &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/funasr/fsmn-vad&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🤗&lt;/a&gt; )&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;voice activity detection&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;5000 hours, Mandarin and English&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;0.4M&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;fsmn-kws &lt;br&gt; ( &lt;a class=&#34;link&#34; href=&#34;https://modelscope.cn/models/iic/speech_charctc_kws_phone-xiaoyun/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;⭐&lt;/a&gt; )&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;keyword spotting，streaming&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;5000 hours, Mandarin&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;0.7M&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;fa-zh &lt;br&gt; ( &lt;a class=&#34;link&#34; href=&#34;https://modelscope.cn/models/damo/speech_timestamp_prediction-v1-16k-offline/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;⭐&lt;/a&gt; &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/funasr/fa-zh&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🤗&lt;/a&gt; )&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;timestamp prediction&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;5000 hours, Mandarin&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;38M&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;cam++ &lt;br&gt; ( &lt;a class=&#34;link&#34; href=&#34;https://modelscope.cn/models/iic/speech_campplus_sv_zh-cn_16k-common/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;⭐&lt;/a&gt; &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/funasr/campplus&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🤗&lt;/a&gt; )&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;speaker verification/diarization&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;5000 hours&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;7.2M&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;Whisper-large-v3 &lt;br&gt; (&lt;a class=&#34;link&#34; href=&#34;https://www.modelscope.cn/models/iic/Whisper-large-v3/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;⭐&lt;/a&gt;  &lt;a class=&#34;link&#34; href=&#34;https://github.com/openai/whisper&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🍀&lt;/a&gt; )&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;speech recognition, with timestamps, non-streaming&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;multilingual&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;1550 M&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;Whisper-large-v3-turbo &lt;br&gt; (&lt;a class=&#34;link&#34; href=&#34;https://www.modelscope.cn/models/iic/Whisper-large-v3-turbo/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;⭐&lt;/a&gt;  &lt;a class=&#34;link&#34; href=&#34;https://github.com/openai/whisper&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🍀&lt;/a&gt; )&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;speech recognition, with timestamps, non-streaming&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;multilingual&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;809 M&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;Qwen-Audio &lt;br&gt; (&lt;a class=&#34;link&#34; href=&#34;examples/industrial_data_pretraining/qwen_audio/demo.py&#34; &gt;⭐&lt;/a&gt;  &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/Qwen/Qwen-Audio&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🤗&lt;/a&gt; )&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;audio-text multimodal models (pretraining)&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;multilingual&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;8B&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;Qwen-Audio-Chat &lt;br&gt; (&lt;a class=&#34;link&#34; href=&#34;examples/industrial_data_pretraining/qwen_audio/demo_chat.py&#34; &gt;⭐&lt;/a&gt;  &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/Qwen/Qwen-Audio-Chat&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🤗&lt;/a&gt; )&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;audio-text multimodal models (chat)&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;multilingual&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;8B&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;emotion2vec+large &lt;br&gt; (&lt;a class=&#34;link&#34; href=&#34;https://modelscope.cn/models/iic/emotion2vec_plus_large/summary&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;⭐&lt;/a&gt;  &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/emotion2vec/emotion2vec_plus_large&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🤗&lt;/a&gt; )&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;speech emotion recongintion&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;40000 hours&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;300M&lt;/td&gt;
      &lt;/tr&gt;
  &lt;/tbody&gt;
&lt;/table&gt;
&lt;p&gt;&lt;a name=&#34;quick-start&#34;&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h2 id=&#34;quick-start&#34;&gt;Quick Start
&lt;/h2&gt;&lt;p&gt;Below is a quick start tutorial. Test audio files (&lt;a class=&#34;link&#34; href=&#34;https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Mandarin&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_en.wav&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;English&lt;/a&gt;).&lt;/p&gt;
&lt;h3 id=&#34;command-line-usage&#34;&gt;Command-line usage
&lt;/h3&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-shell&#34; data-lang=&#34;shell&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;funasr ++model&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;paraformer-zh ++vad_model&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;fsmn-vad&amp;#34;&lt;/span&gt; ++punc_model&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;ct-punc&amp;#34;&lt;/span&gt; ++input&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;asr_example_zh.wav
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;Notes: Support recognition of single audio file, as well as file list in Kaldi-style wav.scp format: &lt;code&gt;wav_id wav_pat&lt;/code&gt;&lt;/p&gt;
&lt;h3 id=&#34;speech-recognition-non-streaming&#34;&gt;Speech Recognition (Non-streaming)
&lt;/h3&gt;&lt;h4 id=&#34;sensevoice&#34;&gt;SenseVoice
&lt;/h4&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt; 1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 6
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 7
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 8
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 9
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;10
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;11
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;12
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;13
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;14
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;15
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;16
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;17
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;18
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;19
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;20
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;21
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;22
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;23
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;24
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-python&#34; data-lang=&#34;python&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;kn&#34;&gt;from&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;funasr&lt;/span&gt; &lt;span class=&#34;kn&#34;&gt;import&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;AutoModel&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;kn&#34;&gt;from&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;funasr.utils.postprocess_utils&lt;/span&gt; &lt;span class=&#34;kn&#34;&gt;import&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;rich_transcription_postprocess&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;model_dir&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;s2&#34;&gt;&amp;#34;iic/SenseVoiceSmall&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;AutoModel&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model_dir&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;vad_model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;fsmn-vad&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;vad_kwargs&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;{&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;max_single_segment_time&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;:&lt;/span&gt; &lt;span class=&#34;mi&#34;&gt;30000&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;},&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;device&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;cuda:0&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# en&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;res&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;generate&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;nb&#34;&gt;input&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;sa&#34;&gt;f&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;&lt;/span&gt;&lt;span class=&#34;si&#34;&gt;{&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model_path&lt;/span&gt;&lt;span class=&#34;si&#34;&gt;}&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;/example/en.mp3&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;cache&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;{},&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;language&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;auto&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;  &lt;span class=&#34;c1&#34;&gt;# &amp;#34;zn&amp;#34;, &amp;#34;en&amp;#34;, &amp;#34;yue&amp;#34;, &amp;#34;ja&amp;#34;, &amp;#34;ko&amp;#34;, &amp;#34;nospeech&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;use_itn&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;kc&#34;&gt;True&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;batch_size_s&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;60&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;merge_vad&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;kc&#34;&gt;True&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;  &lt;span class=&#34;c1&#34;&gt;#&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;merge_length_s&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;15&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;text&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;rich_transcription_postprocess&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;res&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;[&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;0&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;][&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;text&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;])&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;print&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;text&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;Parameter Description:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;code&gt;model_dir&lt;/code&gt;: The name of the model, or the path to the model on the local disk.&lt;/li&gt;
&lt;li&gt;&lt;code&gt;vad_model&lt;/code&gt;: This indicates the activation of VAD (Voice Activity Detection). The purpose of VAD is to split long audio into shorter clips. In this case, the inference time includes both VAD and SenseVoice total consumption, and represents the end-to-end latency. If you wish to test the SenseVoice model&amp;rsquo;s inference time separately, the VAD model can be disabled.&lt;/li&gt;
&lt;li&gt;&lt;code&gt;vad_kwargs&lt;/code&gt;: Specifies the configurations for the VAD model. &lt;code&gt;max_single_segment_time&lt;/code&gt;: denotes the maximum duration for audio segmentation by the &lt;code&gt;vad_model&lt;/code&gt;, with the unit being milliseconds (ms).&lt;/li&gt;
&lt;li&gt;&lt;code&gt;use_itn&lt;/code&gt;: Whether the output result includes punctuation and inverse text normalization.&lt;/li&gt;
&lt;li&gt;&lt;code&gt;batch_size_s&lt;/code&gt;: Indicates the use of dynamic batching, where the total duration of audio in the batch is measured in seconds (s).&lt;/li&gt;
&lt;li&gt;&lt;code&gt;merge_vad&lt;/code&gt;: Whether to merge short audio fragments segmented by the VAD model, with the merged length being &lt;code&gt;merge_length_s&lt;/code&gt;, in seconds (s).&lt;/li&gt;
&lt;li&gt;&lt;code&gt;ban_emo_unk&lt;/code&gt;: Whether to ban the output of the &lt;code&gt;emo_unk&lt;/code&gt; token.&lt;/li&gt;
&lt;/ul&gt;
&lt;h4 id=&#34;paraformer&#34;&gt;Paraformer
&lt;/h4&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt; 1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 6
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 7
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 8
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 9
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;10
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-python&#34; data-lang=&#34;python&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;kn&#34;&gt;from&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;funasr&lt;/span&gt; &lt;span class=&#34;kn&#34;&gt;import&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;AutoModel&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# paraformer-zh is a multi-functional asr model&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# use vad, punc, spk or not as you need&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;AutoModel&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;paraformer-zh&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;  &lt;span class=&#34;n&#34;&gt;vad_model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;fsmn-vad&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;  &lt;span class=&#34;n&#34;&gt;punc_model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;ct-punc&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; 
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;                  &lt;span class=&#34;c1&#34;&gt;# spk_model=&amp;#34;cam++&amp;#34;, &lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;                  &lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;res&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;generate&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;nb&#34;&gt;input&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;sa&#34;&gt;f&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;&lt;/span&gt;&lt;span class=&#34;si&#34;&gt;{&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model_path&lt;/span&gt;&lt;span class=&#34;si&#34;&gt;}&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;/example/asr_example.wav&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; 
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;                     &lt;span class=&#34;n&#34;&gt;batch_size_s&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;300&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; 
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;                     &lt;span class=&#34;n&#34;&gt;hotword&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s1&#34;&gt;&amp;#39;魔搭&amp;#39;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;print&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;res&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;Note: &lt;code&gt;hub&lt;/code&gt;: represents the model repository, &lt;code&gt;ms&lt;/code&gt; stands for selecting ModelScope download, &lt;code&gt;hf&lt;/code&gt; stands for selecting Huggingface download.&lt;/p&gt;
&lt;h3 id=&#34;speech-recognition-streaming&#34;&gt;Speech Recognition (Streaming)
&lt;/h3&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt; 1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 6
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 7
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 8
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 9
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;10
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;11
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;12
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;13
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;14
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;15
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;16
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;17
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;18
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;19
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;20
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;21
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;22
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-python&#34; data-lang=&#34;python&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;kn&#34;&gt;from&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;funasr&lt;/span&gt; &lt;span class=&#34;kn&#34;&gt;import&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;AutoModel&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;chunk_size&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;p&#34;&gt;[&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;0&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;mi&#34;&gt;10&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;mi&#34;&gt;5&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;]&lt;/span&gt; &lt;span class=&#34;c1&#34;&gt;#[0, 10, 5] 600ms, [0, 8, 4] 480ms&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;encoder_chunk_look_back&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;mi&#34;&gt;4&lt;/span&gt; &lt;span class=&#34;c1&#34;&gt;#number of chunks to lookback for encoder self-attention&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;decoder_chunk_look_back&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;mi&#34;&gt;1&lt;/span&gt; &lt;span class=&#34;c1&#34;&gt;#number of encoder chunks to lookback for decoder cross-attention&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;AutoModel&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;paraformer-zh-streaming&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;kn&#34;&gt;import&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;soundfile&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;kn&#34;&gt;import&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;os&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;wav_file&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;os&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;path&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;join&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model_path&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;s2&#34;&gt;&amp;#34;example/asr_example.wav&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;speech&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;sample_rate&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;soundfile&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;read&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;wav_file&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;chunk_stride&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;chunk_size&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;[&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;1&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;]&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;*&lt;/span&gt; &lt;span class=&#34;mi&#34;&gt;960&lt;/span&gt; &lt;span class=&#34;c1&#34;&gt;# 600ms&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;cache&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;p&#34;&gt;{}&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;total_chunk_num&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;nb&#34;&gt;int&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;nb&#34;&gt;len&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;((&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;speech&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;-&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;1&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;/&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;chunk_stride&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;+&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;1&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;k&#34;&gt;for&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;i&lt;/span&gt; &lt;span class=&#34;ow&#34;&gt;in&lt;/span&gt; &lt;span class=&#34;nb&#34;&gt;range&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;total_chunk_num&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;):&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;speech_chunk&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;speech&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;[&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;i&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;*&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;chunk_stride&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;:(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;i&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;+&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;1&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;*&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;chunk_stride&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;]&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;is_final&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;i&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;==&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;total_chunk_num&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;-&lt;/span&gt; &lt;span class=&#34;mi&#34;&gt;1&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;res&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;generate&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;nb&#34;&gt;input&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;speech_chunk&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;cache&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;cache&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;is_final&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;is_final&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;chunk_size&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;chunk_size&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;encoder_chunk_look_back&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;encoder_chunk_look_back&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;decoder_chunk_look_back&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;decoder_chunk_look_back&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;nb&#34;&gt;print&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;res&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;Note: &lt;code&gt;chunk_size&lt;/code&gt; is the configuration for streaming latency.&lt;code&gt; [0,10,5]&lt;/code&gt; indicates that the real-time display granularity is &lt;code&gt;10*60=600ms&lt;/code&gt;, and the lookahead information is &lt;code&gt;5*60=300ms&lt;/code&gt;. Each inference input is &lt;code&gt;600ms&lt;/code&gt; (sample points are &lt;code&gt;16000*0.6=960&lt;/code&gt;), and the output is the corresponding text. For the last speech segment input, &lt;code&gt;is_final=True&lt;/code&gt; needs to be set to force the output of the last word.&lt;/p&gt;
&lt;details&gt;&lt;summary&gt;More Examples&lt;/summary&gt;
&lt;h3 id=&#34;voice-activity-detection-non-streaming&#34;&gt;Voice Activity Detection (Non-Streaming)
&lt;/h3&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;6
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-python&#34; data-lang=&#34;python&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;kn&#34;&gt;from&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;funasr&lt;/span&gt; &lt;span class=&#34;kn&#34;&gt;import&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;AutoModel&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;AutoModel&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;fsmn-vad&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;wav_file&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;sa&#34;&gt;f&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;&lt;/span&gt;&lt;span class=&#34;si&#34;&gt;{&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model_path&lt;/span&gt;&lt;span class=&#34;si&#34;&gt;}&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;/example/vad_example.wav&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;res&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;generate&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;nb&#34;&gt;input&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;wav_file&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;print&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;res&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;Note: The output format of the VAD model is: &lt;code&gt;[[beg1, end1], [beg2, end2], ..., [begN, endN]]&lt;/code&gt;, where &lt;code&gt;begN/endN&lt;/code&gt; indicates the starting/ending point of the &lt;code&gt;N-th&lt;/code&gt; valid audio segment, measured in milliseconds.&lt;/p&gt;
&lt;h3 id=&#34;voice-activity-detection-streaming&#34;&gt;Voice Activity Detection (Streaming)
&lt;/h3&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt; 1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 6
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 7
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 8
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 9
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;10
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;11
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;12
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;13
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;14
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;15
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;16
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;17
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;18
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;19
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-python&#34; data-lang=&#34;python&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;kn&#34;&gt;from&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;funasr&lt;/span&gt; &lt;span class=&#34;kn&#34;&gt;import&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;AutoModel&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;chunk_size&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;mi&#34;&gt;200&lt;/span&gt; &lt;span class=&#34;c1&#34;&gt;# ms&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;AutoModel&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;fsmn-vad&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;kn&#34;&gt;import&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;soundfile&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;wav_file&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;sa&#34;&gt;f&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;&lt;/span&gt;&lt;span class=&#34;si&#34;&gt;{&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model_path&lt;/span&gt;&lt;span class=&#34;si&#34;&gt;}&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;/example/vad_example.wav&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;speech&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;sample_rate&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;soundfile&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;read&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;wav_file&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;chunk_stride&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;nb&#34;&gt;int&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;chunk_size&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;*&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;sample_rate&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;/&lt;/span&gt; &lt;span class=&#34;mi&#34;&gt;1000&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;cache&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;p&#34;&gt;{}&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;total_chunk_num&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;nb&#34;&gt;int&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;nb&#34;&gt;len&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;((&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;speech&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;-&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;1&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;/&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;chunk_stride&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;+&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;1&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;k&#34;&gt;for&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;i&lt;/span&gt; &lt;span class=&#34;ow&#34;&gt;in&lt;/span&gt; &lt;span class=&#34;nb&#34;&gt;range&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;total_chunk_num&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;):&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;speech_chunk&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;speech&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;[&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;i&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;*&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;chunk_stride&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;:(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;i&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;+&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;1&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;*&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;chunk_stride&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;]&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;is_final&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;i&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;==&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;total_chunk_num&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;-&lt;/span&gt; &lt;span class=&#34;mi&#34;&gt;1&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;res&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;generate&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;nb&#34;&gt;input&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;speech_chunk&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;cache&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;cache&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;is_final&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;is_final&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;chunk_size&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;chunk_size&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;k&#34;&gt;if&lt;/span&gt; &lt;span class=&#34;nb&#34;&gt;len&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;res&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;[&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;0&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;][&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;value&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;]):&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;        &lt;span class=&#34;nb&#34;&gt;print&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;res&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;Note: The output format for the streaming VAD model can be one of four scenarios:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;code&gt;[[beg1, end1], [beg2, end2], .., [begN, endN]]&lt;/code&gt;：The same as the offline VAD output result mentioned above.&lt;/li&gt;
&lt;li&gt;&lt;code&gt;[[beg, -1]]&lt;/code&gt;：Indicates that only a starting point has been detected.&lt;/li&gt;
&lt;li&gt;&lt;code&gt;[[-1, end]]&lt;/code&gt;：Indicates that only an ending point has been detected.&lt;/li&gt;
&lt;li&gt;&lt;code&gt;[]&lt;/code&gt;：Indicates that neither a starting point nor an ending point has been detected.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;The output is measured in milliseconds and represents the absolute time from the starting point.&lt;/p&gt;
&lt;h3 id=&#34;punctuation-restoration&#34;&gt;Punctuation Restoration
&lt;/h3&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;5
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-python&#34; data-lang=&#34;python&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;kn&#34;&gt;from&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;funasr&lt;/span&gt; &lt;span class=&#34;kn&#34;&gt;import&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;AutoModel&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;AutoModel&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;ct-punc&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;res&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;generate&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;nb&#34;&gt;input&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;那今天的会就到这里吧 happy new year 明年见&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;print&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;res&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h3 id=&#34;timestamp-prediction&#34;&gt;Timestamp Prediction
&lt;/h3&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;6
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;7
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-python&#34; data-lang=&#34;python&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;kn&#34;&gt;from&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;funasr&lt;/span&gt; &lt;span class=&#34;kn&#34;&gt;import&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;AutoModel&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;AutoModel&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;fa-zh&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;wav_file&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;sa&#34;&gt;f&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;&lt;/span&gt;&lt;span class=&#34;si&#34;&gt;{&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model_path&lt;/span&gt;&lt;span class=&#34;si&#34;&gt;}&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;/example/asr_example.wav&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;text_file&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;sa&#34;&gt;f&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;&lt;/span&gt;&lt;span class=&#34;si&#34;&gt;{&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model_path&lt;/span&gt;&lt;span class=&#34;si&#34;&gt;}&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;/example/text.txt&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;res&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;generate&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;nb&#34;&gt;input&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;wav_file&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;text_file&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;),&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;data_type&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;sound&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;s2&#34;&gt;&amp;#34;text&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;))&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;print&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;res&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h3 id=&#34;speech-emotion-recognition&#34;&gt;Speech Emotion Recognition
&lt;/h3&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;6
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;7
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;8
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-python&#34; data-lang=&#34;python&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;kn&#34;&gt;from&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;funasr&lt;/span&gt; &lt;span class=&#34;kn&#34;&gt;import&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;AutoModel&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;AutoModel&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;emotion2vec_plus_large&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;wav_file&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;sa&#34;&gt;f&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;&lt;/span&gt;&lt;span class=&#34;si&#34;&gt;{&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model_path&lt;/span&gt;&lt;span class=&#34;si&#34;&gt;}&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;/example/test.wav&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;res&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;generate&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;wav_file&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;output_dir&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;./outputs&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;granularity&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;utterance&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;extract_embedding&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;kc&#34;&gt;False&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;print&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;res&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;More usages ref to &lt;a class=&#34;link&#34; href=&#34;docs/tutorial/README_zh.md&#34; &gt;docs&lt;/a&gt;,
more examples ref to &lt;a class=&#34;link&#34; href=&#34;https://github.com/alibaba-damo-academy/FunASR/tree/main/examples/industrial_data_pretraining&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;demo&lt;/a&gt;&lt;/p&gt;
&lt;/details&gt;
&lt;h2 id=&#34;export-onnx&#34;&gt;Export ONNX
&lt;/h2&gt;&lt;h3 id=&#34;command-line-usage-1&#34;&gt;Command-line usage
&lt;/h3&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-shell&#34; data-lang=&#34;shell&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;funasr-export ++model&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;paraformer ++quantize&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;nb&#34;&gt;false&lt;/span&gt; ++device&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;cpu
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h3 id=&#34;python&#34;&gt;Python
&lt;/h3&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;5
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-python&#34; data-lang=&#34;python&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;kn&#34;&gt;from&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;funasr&lt;/span&gt; &lt;span class=&#34;kn&#34;&gt;import&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;AutoModel&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;AutoModel&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;paraformer&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;device&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;cpu&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;res&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;export&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;quantize&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;kc&#34;&gt;False&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h3 id=&#34;test-onnx&#34;&gt;Test ONNX
&lt;/h3&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;6
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;7
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;8
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;9
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-python&#34; data-lang=&#34;python&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# pip3 install -U funasr-onnx&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;kn&#34;&gt;from&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;funasr_onnx&lt;/span&gt; &lt;span class=&#34;kn&#34;&gt;import&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;Paraformer&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;model_dir&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;s2&#34;&gt;&amp;#34;damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;Paraformer&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model_dir&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;batch_size&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;1&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;quantize&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;kc&#34;&gt;True&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;wav_path&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;p&#34;&gt;[&lt;/span&gt;&lt;span class=&#34;s1&#34;&gt;&amp;#39;~/.cache/modelscope/hub/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav&amp;#39;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;]&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;result&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;wav_path&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;print&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;result&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;More examples ref to &lt;a class=&#34;link&#34; href=&#34;runtime/python/onnxruntime&#34; &gt;demo&lt;/a&gt;&lt;/p&gt;
&lt;h2 id=&#34;deployment-service&#34;&gt;Deployment Service
&lt;/h2&gt;&lt;p&gt;FunASR supports deploying pre-trained or further fine-tuned models for service. Currently, it supports the following types of service deployment:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;File transcription service, Mandarin, CPU version, done&lt;/li&gt;
&lt;li&gt;The real-time transcription service, Mandarin (CPU), done&lt;/li&gt;
&lt;li&gt;File transcription service, English, CPU version, done&lt;/li&gt;
&lt;li&gt;File transcription service, Mandarin, GPU version, in progress&lt;/li&gt;
&lt;li&gt;and more.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;For more detailed information, please refer to the &lt;a class=&#34;link&#34; href=&#34;runtime/readme.md&#34; &gt;service deployment documentation&lt;/a&gt;.&lt;/p&gt;
&lt;p&gt;&lt;a name=&#34;contact&#34;&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h2 id=&#34;community-communication&#34;&gt;Community Communication
&lt;/h2&gt;&lt;p&gt;If you encounter problems in use, you can directly raise Issues on the github page.&lt;/p&gt;
&lt;p&gt;You can also scan the following DingTalk group to join the community group for communication and discussion.&lt;/p&gt;
&lt;table&gt;
  &lt;thead&gt;
      &lt;tr&gt;
          &lt;th style=&#34;text-align: center&#34;&gt;DingTalk group&lt;/th&gt;
      &lt;/tr&gt;
  &lt;/thead&gt;
  &lt;tbody&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;&lt;div align=&#34;left&#34;&gt;&lt;img src=&#34;docs/images/dingding.png&#34; width=&#34;250&#34;/&gt;&lt;/td&gt;
      &lt;/tr&gt;
  &lt;/tbody&gt;
&lt;/table&gt;
&lt;h2 id=&#34;contributors&#34;&gt;Contributors
&lt;/h2&gt;&lt;table&gt;
  &lt;thead&gt;
      &lt;tr&gt;
          &lt;th style=&#34;text-align: center&#34;&gt;&lt;div align=&#34;left&#34;&gt;&lt;img src=&#34;docs/images/alibaba.png&#34; width=&#34;260&#34;/&gt;&lt;/th&gt;
          &lt;th style=&#34;text-align: center&#34;&gt;&lt;div align=&#34;left&#34;&gt;&lt;img src=&#34;docs/images/nwpu.png&#34; width=&#34;260&#34;/&gt;&lt;/th&gt;
          &lt;th style=&#34;text-align: center&#34;&gt;&lt;img src=&#34;docs/images/China_Telecom.png&#34; width=&#34;200&#34;/&gt; &lt;/div&gt;&lt;/th&gt;
          &lt;th style=&#34;text-align: center&#34;&gt;&lt;img src=&#34;docs/images/RapidAI.png&#34; width=&#34;200&#34;/&gt; &lt;/div&gt;&lt;/th&gt;
          &lt;th style=&#34;text-align: center&#34;&gt;&lt;img src=&#34;docs/images/aihealthx.png&#34; width=&#34;200&#34;/&gt; &lt;/div&gt;&lt;/th&gt;
          &lt;th style=&#34;text-align: center&#34;&gt;&lt;img src=&#34;docs/images/XVERSE.png&#34; width=&#34;250&#34;/&gt; &lt;/div&gt;&lt;/th&gt;
      &lt;/tr&gt;
  &lt;/thead&gt;
  &lt;tbody&gt;
  &lt;/tbody&gt;
&lt;/table&gt;
&lt;p&gt;The contributors can be found in &lt;a class=&#34;link&#34; href=&#34;./Acknowledge.md&#34; &gt;contributors list&lt;/a&gt;&lt;/p&gt;
&lt;h2 id=&#34;license&#34;&gt;License
&lt;/h2&gt;&lt;p&gt;This project is licensed under &lt;a class=&#34;link&#34; href=&#34;https://opensource.org/licenses/MIT&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;The MIT License&lt;/a&gt;. FunASR also contains various third-party components and some code modified from other repos under other open source licenses.
The use of pretraining model is subject to &lt;a class=&#34;link&#34; href=&#34;./MODEL_LICENSE&#34; &gt;model license&lt;/a&gt;&lt;/p&gt;
&lt;h2 id=&#34;citations&#34;&gt;Citations
&lt;/h2&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt; 1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 6
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 7
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 8
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 9
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;10
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;11
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;12
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;13
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;14
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;15
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;16
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;17
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;18
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;19
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;20
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;21
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;22
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;23
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;24
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;25
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;26
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bibtex&#34; data-lang=&#34;bibtex&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nc&#34;&gt;@inproceedings&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;{&lt;/span&gt;&lt;span class=&#34;nl&#34;&gt;gao2023funasr&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;na&#34;&gt;author&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;{Zhifu Gao and Zerui Li and Jiaming Wang and Haoneng Luo and Xian Shi and Mengzhe Chen and Yabin Li and Lingyun Zuo and Zhihao Du and Zhangyu Xiao and Shiliang Zhang}&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;na&#34;&gt;title&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;{FunASR: A Fundamental End-to-End Speech Recognition Toolkit}&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;na&#34;&gt;year&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;{2023}&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;na&#34;&gt;booktitle&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;{INTERSPEECH}&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;p&#34;&gt;}&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nc&#34;&gt;@inproceedings&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;{&lt;/span&gt;&lt;span class=&#34;nl&#34;&gt;An2023bat&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;na&#34;&gt;author&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;{Keyu An and Xian Shi and Shiliang Zhang}&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;na&#34;&gt;title&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;{BAT: Boundary aware transducer for memory-efficient and low-latency ASR}&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;na&#34;&gt;year&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;{2023}&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;na&#34;&gt;booktitle&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;{INTERSPEECH}&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;p&#34;&gt;}&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nc&#34;&gt;@inproceedings&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;{&lt;/span&gt;&lt;span class=&#34;nl&#34;&gt;gao22b_interspeech&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;na&#34;&gt;author&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;{Zhifu Gao and ShiLiang Zhang and Ian McLoughlin and Zhijie Yan}&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;na&#34;&gt;title&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;{Paraformer: Fast and Accurate Parallel Transformer for Non-autoregressive End-to-End Speech Recognition}&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;na&#34;&gt;year&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;m&#34;&gt;2022&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;na&#34;&gt;booktitle&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;{Proc. Interspeech 2022}&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;na&#34;&gt;pages&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;{2063--2067}&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;na&#34;&gt;doi&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;{10.21437/Interspeech.2022-9996}&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;p&#34;&gt;}&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nc&#34;&gt;@inproceedings&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;{&lt;/span&gt;&lt;span class=&#34;nl&#34;&gt;shi2023seaco&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;na&#34;&gt;author&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;{Xian Shi and Yexin Yang and Zerui Li and Yanni Chen and Zhifu Gao and Shiliang Zhang}&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;na&#34;&gt;title&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;{SeACo-Paraformer: A Non-Autoregressive ASR System with Flexible and Effective Hotword Customization Ability}&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;na&#34;&gt;year&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;{2023}&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;na&#34;&gt;booktitle&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;{ICASSP2024}&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;p&#34;&gt;}&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;</description>
        </item>
        
    </channel>
</rss>
