<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
    <channel>
        <title>Nvidia on Producthunt daily</title>
        <link>https://producthunt.programnotes.cn/en/tags/nvidia/</link>
        <description>Recent content in Nvidia on Producthunt daily</description>
        <generator>Hugo -- gohugo.io</generator>
        <language>en</language>
        <lastBuildDate>Tue, 23 Sep 2025 15:28:47 +0800</lastBuildDate><atom:link href="https://producthunt.programnotes.cn/en/tags/nvidia/index.xml" rel="self" type="application/rss+xml" /><item>
        <title>Sunshine</title>
        <link>https://producthunt.programnotes.cn/en/p/sunshine/</link>
        <pubDate>Tue, 23 Sep 2025 15:28:47 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/sunshine/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1590147074903-b9ad6ba9eb5a?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NTg2MTI0NzN8&amp;ixlib=rb-4.1.0" alt="Featured image of post Sunshine" /&gt;&lt;h1 id=&#34;lizardbytesunshine&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/LizardByte/Sunshine&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;LizardByte/Sunshine&lt;/a&gt;
&lt;/h1&gt;&lt;div align=&#34;center&#34;&gt;
  &lt;img src=&#34;sunshine.png&#34; /&gt;
  &lt;h1 align=&#34;center&#34;&gt;Sunshine&lt;/h1&gt;
  &lt;h4 align=&#34;center&#34;&gt;Self-hosted game stream host for Moonlight.&lt;/h4&gt;
&lt;/div&gt;
&lt;div align=&#34;center&#34;&gt;
  &lt;a href=&#34;https://github.com/LizardByte/Sunshine&#34;&gt;&lt;img src=&#34;https://img.shields.io/github/stars/lizardbyte/sunshine.svg?logo=github&amp;style=for-the-badge&#34; alt=&#34;GitHub stars&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://github.com/LizardByte/Sunshine/releases/latest&#34;&gt;&lt;img src=&#34;https://img.shields.io/github/downloads/lizardbyte/sunshine/total.svg?style=for-the-badge&amp;logo=github&#34; alt=&#34;GitHub Releases&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://hub.docker.com/r/lizardbyte/sunshine&#34;&gt;&lt;img src=&#34;https://img.shields.io/docker/pulls/lizardbyte/sunshine.svg?style=for-the-badge&amp;logo=docker&#34; alt=&#34;Docker&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://github.com/LizardByte/Sunshine/pkgs/container/sunshine&#34;&gt;&lt;img src=&#34;https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fipitio.github.io%2Fbackage%2FLizardByte%2FSunshine%2Fsunshine.json&amp;query=%24.downloads&amp;label=ghcr%20pulls&amp;style=for-the-badge&amp;logo=github&#34; alt=&#34;GHCR&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://flathub.org/apps/dev.lizardbyte.app.Sunshine&#34;&gt;&lt;img src=&#34;https://img.shields.io/flathub/downloads/dev.lizardbyte.app.Sunshine?style=for-the-badge&amp;logo=flathub&#34; alt=&#34;Flathub installs&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://flathub.org/apps/dev.lizardbyte.app.Sunshine&#34;&gt;&lt;img src=&#34;https://img.shields.io/flathub/v/dev.lizardbyte.app.Sunshine?style=for-the-badge&amp;logo=flathub&#34; alt=&#34;Flathub Version&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://github.com/microsoft/winget-pkgs/tree/master/manifests/l/LizardByte/Sunshine&#34;&gt;&lt;img src=&#34;https://img.shields.io/winget/v/LizardByte.Sunshine?style=for-the-badge&amp;logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAHuSURBVFhH7ZfNTtRQGIYZiMDwN/IrCAqIhMSNKxcmymVwG+5dcDVsWHgDrtxwCYQVl+BChzDEwSnPY+eQ0sxoOz1mQuBNnpyvTdvz9jun5/SrjfxnJUkyQbMEz2ELduF1l0YUA3QyTrMAa2AnPtyOXsELeAYNyKtV2EC3k3lYgTOwg09ghy/BTp7CKBRV844BOpmmMV2+ySb4BmInG7AKY7AHH+EYqqhZo9PPBG/BVDlOizAD/XQFmnoPXzxRQX8M/CCYS48L6RIc4ygGHK9WGg9HZSZMUNRPVwNJGg5Hg2Qgqh4N3FsDsb6EmgYm07iwwvUxstdxJTwgmILf4CfZ6bb5OHANX8GN5x20IVxnG8ge94pt2xpwU3GnCwayF4Q2G2vgFLzHndFzQdk4q77nNfCdwL28qNyMtmEf3A1/QV5FjDiPWo5jrwf8TWZChTlgJvL4F9QL50/A43qVidTvLcuoM2wDQ1+IkgefgUpLcYwMVBqCKNJA2b0gKNocOIITOIef8C/F/CdMbh/GklynsSawKLHS8d9/B1x2LUqsfFyy3TMsWj5A1cLkotDbYO4JjWWZlZEGv8EbOIR1CAVN2eG8W5oNKgxaeC6DmTJjZs7ixUxpznLPLT+v4sXpoMLcLI3mzFSonDXIEI/M3QCIO4YuimBJ/gAAAABJRU5ErkJggg==&#34; alt=&#34;Winget Version&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://gurubase.io/g/sunshine&#34;&gt;&lt;img src=&#34;https://img.shields.io/badge/Gurubase-Ask%20Guru-ef1a1b?style=for-the-badge&amp;logo=data:image/jpeg;base64,/9j/2wCEAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDIBCQkJDAsMGA0NGDIhHCEyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMv/AABEIABgAGAMBIgACEQEDEQH/xAGiAAABBQEBAQEBAQAAAAAAAAAAAQIDBAUGBwgJCgsQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNRYQcicRQygZGhCCNCscEVUtHwJDNicoIJChYXGBkaJSYnKCkqNDU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6g4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2drh4uPk5ebn6Onq8fLz9PX29/j5+gEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoLEQACAQIEBAMEBwUEBAABAncAAQIDEQQFITEGEkFRB2FxEyIygQgUQpGhscEJIzNS8BVictEKFiQ04SXxFxgZGiYnKCkqNTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqCg4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2dri4+Tl5ufo6ery8/T19vf4+fr/2gAMAwEAAhEDEQA/AOLqSO3mlilljido4QGkYDIQEgAn05IH41seFo7aS+uRKlrJci2Y2cd2QImlyOGyQPu7sA8ZxXapAlvpThbPRkv7nTQWhDoIZZRc/XaSAOmcZGOnFfP06XMr3P17F5iqE+Tl1uuvf9Lde55dRW74pit4r61EcdtFdG2U3kVqQY0lyeBgkD5duQOASawqykuV2O6jV9rTU0rXLNjf3Om3QubSXy5QCudoYEEYIIOQR7GnahqV3qk6zXk3mOqhFAUKqqOyqAAByeAKqUUXdrFezhz89lfv1+8KKKKRZ//Z&#34; alt=&#34;Gurubase&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://github.com/LizardByte/Sunshine/actions/workflows/ci.yml?query=branch%3Amaster&#34;&gt;&lt;img src=&#34;https://img.shields.io/github/actions/workflow/status/lizardbyte/sunshine/ci.yml.svg?branch=master&amp;label=CI%20build&amp;logo=github&amp;style=for-the-badge&#34; alt=&#34;GitHub Workflow Status (CI)&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://github.com/LizardByte/Sunshine/actions/workflows/localize.yml?query=branch%3Amaster&#34;&gt;&lt;img src=&#34;https://img.shields.io/github/actions/workflow/status/lizardbyte/sunshine/localize.yml.svg?branch=master&amp;label=localize%20build&amp;logo=github&amp;style=for-the-badge&#34; alt=&#34;GitHub Workflow Status (localize)&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://docs.lizardbyte.dev/projects/sunshine&#34;&gt;&lt;img src=&#34;https://img.shields.io/readthedocs/sunshinestream.svg?label=Docs&amp;style=for-the-badge&amp;logo=readthedocs&#34; alt=&#34;Read the Docs&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://codecov.io/gh/LizardByte/Sunshine&#34;&gt;&lt;img src=&#34;https://img.shields.io/codecov/c/gh/LizardByte/Sunshine?token=SMGXQ5NVMJ&amp;style=for-the-badge&amp;logo=codecov&amp;label=codecov&#34; alt=&#34;Codecov&#34;&gt;&lt;/a&gt;
&lt;/div&gt;
&lt;h2 id=&#34;ℹ-about&#34;&gt;ℹ️ About
&lt;/h2&gt;&lt;p&gt;Sunshine is a self-hosted game stream host for Moonlight.
Offering low latency, cloud gaming server capabilities with support for AMD, Intel, and Nvidia GPUs for hardware
encoding. Software encoding is also available. You can connect to Sunshine from any Moonlight client on a variety of
devices. A web UI is provided to allow configuration, and client pairing, from your favorite web browser. Pair from
the local server or any mobile device.&lt;/p&gt;
&lt;p&gt;LizardByte has the full documentation hosted on &lt;a class=&#34;link&#34; href=&#34;https://docs.lizardbyte.dev/projects/sunshine&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Read the Docs&lt;/a&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.lizardbyte.dev/projects/sunshine/latest/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Stable&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.lizardbyte.dev/projects/sunshine/master/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Beta&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;-system-requirements&#34;&gt;🖥️ System Requirements
&lt;/h2&gt;&lt;blockquote&gt;
&lt;p&gt;[!WARNING]
These tables are a work in progress. Do not purchase hardware based on this information.&lt;/p&gt;
&lt;/blockquote&gt;
&lt;table&gt;
    &lt;caption id=&#34;minimum_requirements&#34;&gt;Minimum Requirements&lt;/caption&gt;
    &lt;tr&gt;
        &lt;th&gt;Component&lt;/th&gt;
        &lt;th&gt;Requirement&lt;/th&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;3&#34;&gt;GPU&lt;/td&gt;
        &lt;td&gt;AMD: VCE 1.0 or higher, see: &lt;a href=&#34;https://github.com/obsproject/obs-amd-encoder/wiki/Hardware-Support&#34;&gt;obs-amd hardware support&lt;/a&gt;&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;
            Intel:&lt;br&gt;
            &amp;nbsp;&amp;nbsp;Linux: VAAPI-compatible, see: &lt;a href=&#34;https://www.intel.com/content/www/us/en/developer/articles/technical/linuxmedia-vaapi.html&#34;&gt;VAAPI hardware support&lt;/a&gt;&lt;br&gt;
            &amp;nbsp;&amp;nbsp;Windows: Skylake or newer with QuickSync encoding support
        &lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Nvidia: NVENC enabled cards, see: &lt;a href=&#34;https://developer.nvidia.com/video-encode-and-decode-gpu-support-matrix-new&#34;&gt;nvenc support matrix&lt;/a&gt;&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;2&#34;&gt;CPU&lt;/td&gt;
        &lt;td&gt;AMD: Ryzen 3 or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Intel: Core i3 or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;RAM&lt;/td&gt;
        &lt;td&gt;4GB or more&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;5&#34;&gt;OS&lt;/td&gt;
        &lt;td&gt;Windows: 10+ (Windows Server does not support virtual gamepads)&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;macOS: 14+&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Linux/Debian: 13+ (trixie)&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Linux/Fedora: 41+&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Linux/Ubuntu: 22.04+ (jammy)&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;2&#34;&gt;Network&lt;/td&gt;
        &lt;td&gt;Host: 5GHz, 802.11ac&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Client: 5GHz, 802.11ac&lt;/td&gt;
    &lt;/tr&gt;
&lt;/table&gt;
&lt;table&gt;
    &lt;caption id=&#34;4k_suggestions&#34;&gt;4k Suggestions&lt;/caption&gt;
    &lt;tr&gt;
        &lt;th&gt;Component&lt;/th&gt;
        &lt;th&gt;Requirement&lt;/th&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;3&#34;&gt;GPU&lt;/td&gt;
        &lt;td&gt;AMD: Video Coding Engine 3.1 or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;
            Intel:&lt;br&gt;
            &amp;nbsp;&amp;nbsp;Linux: HD Graphics 510 or higher&lt;br&gt;
            &amp;nbsp;&amp;nbsp;Windows: Skylake or newer with QuickSync encoding support
        &lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;
            Nvidia:&lt;br&gt;
            &amp;nbsp;&amp;nbsp;Linux: GeForce RTX 2000 series or higher&lt;br&gt;
            &amp;nbsp;&amp;nbsp;Windows: Geforce GTX 1080 or higher
        &lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;2&#34;&gt;CPU&lt;/td&gt;
        &lt;td&gt;AMD: Ryzen 5 or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Intel: Core i5 or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;2&#34;&gt;Network&lt;/td&gt;
        &lt;td&gt;Host: CAT5e ethernet or better&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Client: CAT5e ethernet or better&lt;/td&gt;
    &lt;/tr&gt;
&lt;/table&gt;
&lt;table&gt;
    &lt;caption id=&#34;hdr_suggestions&#34;&gt;HDR Suggestions&lt;/caption&gt;
    &lt;tr&gt;
        &lt;th&gt;Component&lt;/th&gt;
        &lt;th&gt;Requirement&lt;/th&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;3&#34;&gt;GPU&lt;/td&gt;
        &lt;td&gt;AMD: Video Coding Engine 3.4 or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Intel: HD Graphics 730 or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Nvidia: Pascal-based GPU (GTX 10-series) or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;2&#34;&gt;CPU&lt;/td&gt;
        &lt;td&gt;AMD: Ryzen 5 or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Intel: Core i5 or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;2&#34;&gt;Network&lt;/td&gt;
        &lt;td&gt;Host: CAT5e ethernet or better&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Client: CAT5e ethernet or better&lt;/td&gt;
    &lt;/tr&gt;
&lt;/table&gt;
&lt;h2 id=&#34;-support&#34;&gt;❓ Support
&lt;/h2&gt;&lt;p&gt;Our support methods are listed in our &lt;a class=&#34;link&#34; href=&#34;https://docs.lizardbyte.dev/latest/about/support.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;LizardByte Docs&lt;/a&gt;.&lt;/p&gt;
&lt;h2 id=&#34;-sponsors-and-supporters&#34;&gt;💲 Sponsors and Supporters
&lt;/h2&gt;&lt;p align=&#34;center&#34;&gt;
  &lt;a href=&#34;https://app.lizardbyte.dev&#34; aria-label=&#34;Sponsor LizardByte&#34;&gt;
    &lt;img src=&#39;https://raw.githubusercontent.com/LizardByte/contributors/refs/heads/dist/sponsors.svg&#39;/&gt;
  &lt;/a&gt;
&lt;/p&gt;
&lt;h2 id=&#34;-contributors&#34;&gt;👥 Contributors
&lt;/h2&gt;&lt;p&gt;Thank you to all the contributors who have helped make Sunshine better!&lt;/p&gt;
&lt;h3 id=&#34;github&#34;&gt;GitHub
&lt;/h3&gt;&lt;p align=&#34;center&#34;&gt;
  &lt;a href=&#34;https://github.com/LizardByte/Sunshine&#34; aria-label=&#34;GitHub&#34;&gt;
    &lt;img src=&#39;https://raw.githubusercontent.com/LizardByte/contributors/refs/heads/dist/github.Sunshine.svg&#39;/&gt;
  &lt;/a&gt;
&lt;/p&gt;
&lt;h3 id=&#34;crowdin&#34;&gt;CrowdIn
&lt;/h3&gt;&lt;p align=&#34;center&#34;&gt;
  &lt;a href=&#34;https://translate.lizardbyte.dev&#34; aria-label=&#34;CrowdIn&#34;&gt;
    &lt;img src=&#39;https://raw.githubusercontent.com/LizardByte/contributors/refs/heads/dist/crowdin.606145.svg&#39;/&gt;
  &lt;/a&gt;
&lt;/p&gt;
&lt;div class=&#34;section_buttons&#34;&gt;
&lt;table&gt;
  &lt;thead&gt;
      &lt;tr&gt;
          &lt;th style=&#34;text-align: left&#34;&gt;Previous&lt;/th&gt;
          &lt;th style=&#34;text-align: right&#34;&gt;Next&lt;/th&gt;
      &lt;/tr&gt;
  &lt;/thead&gt;
  &lt;tbody&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: left&#34;&gt;&lt;/td&gt;
          &lt;td style=&#34;text-align: right&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;docs/getting_started.md&#34; &gt;Getting Started&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
  &lt;/tbody&gt;
&lt;/table&gt;
&lt;/div&gt;
&lt;details style=&#34;display: none;&#34;&gt;
  &lt;summary&gt;&lt;/summary&gt;
  [TOC]
&lt;/details&gt;
</description>
        </item>
        <item>
        <title>Product Hunt Daily | 2025-08-14</title>
        <link>https://producthunt.programnotes.cn/en/p/product-hunt-daily-2025-08-14/</link>
        <pubDate>Thu, 14 Aug 2025 07:30:02 +0000</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/product-hunt-daily-2025-08-14/</guid>
        <description>&lt;img src="https://ph-files.imgix.net/9d60964c-1fbe-4f61-ac66-d6d90deb3fe6.png?auto=format&amp;fit=crop&amp;frame=1&amp;h=512&amp;w=1024" alt="Featured image of post Product Hunt Daily | 2025-08-14" /&gt;&lt;h2 id=&#34;1-autumn&#34;&gt;1. Autumn
&lt;/h2&gt;&lt;p&gt;&lt;strong&gt;Tagline&lt;/strong&gt;: Stripe made easy for AI startups&lt;br&gt;
&lt;strong&gt;Description&lt;/strong&gt;: Autumn helps AI startups price, meter, and control usage with just 3 API calls. Built on Stripe, it manages subscriptions, usage, and access in one place. No webhooks or backend logic needed. Ideal for early stage teams building LLM and image apps.&lt;br&gt;
&lt;strong&gt;Website&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/r/ML5KNSZXPQXYTN?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;open&lt;/a&gt;&lt;br&gt;
&lt;strong&gt;Product Hunt&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/products/autumn-3?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;View on Product Hunt&lt;/a&gt;&lt;br&gt;
&lt;img src=&#34;https://ph-files.imgix.net/9d60964c-1fbe-4f61-ac66-d6d90deb3fe6.png?auto=format&amp;amp;fit=crop&amp;amp;frame=1&amp;amp;h=512&amp;amp;w=1024&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Autumn&#34;
	
	
&gt;&lt;br&gt;
&lt;strong&gt;Keyword&lt;/strong&gt;: AI pricing, AI metering, AI subscriptions, Stripe integration, AI billing, usage-based pricing, LLM billing, image generation billing, SaaS metering, subscription management, no-code billing&lt;br&gt;
&lt;strong&gt;VotesCount&lt;/strong&gt;: 🔺541&lt;br&gt;
&lt;strong&gt;Featured&lt;/strong&gt;: Yes&lt;br&gt;
&lt;strong&gt;CreatedAt&lt;/strong&gt;: 2025-08-13 07:01 AM (UTC)&lt;/p&gt;
&lt;hr&gt;
&lt;h2 id=&#34;2-mcp-use&#34;&gt;2. mcp-use
&lt;/h2&gt;&lt;p&gt;&lt;strong&gt;Tagline&lt;/strong&gt;: Open source SDK and infra for MCP servers &amp;amp; agents&lt;br&gt;
&lt;strong&gt;Description&lt;/strong&gt;: mcp-use is the open-source devtools and cloud infrastructure to help dev teams quickly build and deploy custom AI agents with MCP servers. Our SDK has over 5,000 GitHub stars, 100k downloads, and is trusted by engineers at NASA, Cisco, NVIDIA, etc&amp;hellip;&lt;br&gt;
&lt;strong&gt;Website&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/r/JP73GWHPK3OPND?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;open&lt;/a&gt;&lt;br&gt;
&lt;strong&gt;Product Hunt&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/products/mcp-use?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;View on Product Hunt&lt;/a&gt;&lt;br&gt;
&lt;img src=&#34;https://ph-files.imgix.net/7dce49d1-3a3a-4b4b-b949-5badbeb3dc3f.png?auto=format&amp;amp;fit=crop&amp;amp;frame=1&amp;amp;h=512&amp;amp;w=1024&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;mcp-use&#34;
	
	
&gt;&lt;br&gt;
&lt;strong&gt;Keyword&lt;/strong&gt;: mcp, open source, SDK, AI agents, devtools, cloud infrastructure,  MCP server,  custom AI,  GitHub, NASA, Cisco, NVIDIA&lt;br&gt;
&lt;strong&gt;VotesCount&lt;/strong&gt;: 🔺430&lt;br&gt;
&lt;strong&gt;Featured&lt;/strong&gt;: Yes&lt;br&gt;
&lt;strong&gt;CreatedAt&lt;/strong&gt;: 2025-08-13 07:01 AM (UTC)&lt;/p&gt;
&lt;hr&gt;
&lt;h2 id=&#34;3-bio-calls-by-cross-paths&#34;&gt;3. Bio Calls by Cross Paths
&lt;/h2&gt;&lt;p&gt;&lt;strong&gt;Tagline&lt;/strong&gt;: Monetize all your social media in 60 seconds&lt;br&gt;
&lt;strong&gt;Description&lt;/strong&gt;: Link-in-bio on anabolic steroids — Hop on 1:1 calls with your social media followers. Display links. Join the pack and solve problems cooperatively. Earn actively and passively. ❤️&lt;br&gt;
&lt;strong&gt;Website&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/r/TUPPQ4Z5F3UQG4?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;open&lt;/a&gt;&lt;br&gt;
&lt;strong&gt;Product Hunt&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/products/bio-calls-by-cross-paths?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;View on Product Hunt&lt;/a&gt;&lt;br&gt;
&lt;img src=&#34;https://ph-files.imgix.net/67d236b3-dfb6-4dca-8c19-ef974c90579d.jpeg?auto=format&amp;amp;fit=crop&amp;amp;frame=1&amp;amp;h=512&amp;amp;w=1024&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Bio Calls by Cross Paths&#34;
	
	
&gt;&lt;br&gt;
&lt;strong&gt;Keyword&lt;/strong&gt;: Link in bio, social media monetization, 1:1 calls,  passive income, active income, social media marketing,  Bio Calls, Cross Paths&lt;br&gt;
&lt;strong&gt;VotesCount&lt;/strong&gt;: 🔺423&lt;br&gt;
&lt;strong&gt;Featured&lt;/strong&gt;: Yes&lt;br&gt;
&lt;strong&gt;CreatedAt&lt;/strong&gt;: 2025-08-13 07:01 AM (UTC)&lt;/p&gt;
&lt;hr&gt;
&lt;h2 id=&#34;4-kandid&#34;&gt;4. Kandid
&lt;/h2&gt;&lt;p&gt;&lt;strong&gt;Tagline&lt;/strong&gt;: Consultative AI salesperson for ecommerce&lt;br&gt;
&lt;strong&gt;Description&lt;/strong&gt;: Kandid is an AI-powered sales assistant for consumer brands that boosts conversions by guiding shoppers, recommending products, answering queries, and upselling like a trained store rep - all in real time, 24x7, across your website.&lt;br&gt;
&lt;strong&gt;Website&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/r/6EXX7JB7Q3ATQ6?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;open&lt;/a&gt;&lt;br&gt;
&lt;strong&gt;Product Hunt&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/products/kandid-2?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;View on Product Hunt&lt;/a&gt;&lt;br&gt;
&lt;img src=&#34;https://ph-files.imgix.net/a0ee1d43-924f-4322-91d9-8a4e056f0814.png?auto=format&amp;amp;fit=crop&amp;amp;frame=1&amp;amp;h=512&amp;amp;w=1024&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Kandid&#34;
	
	
&gt;&lt;br&gt;
&lt;strong&gt;Keyword&lt;/strong&gt;: AI Sales Assistant, Ecommerce AI, AI Salesperson, Conversational AI, Sales Automation, AI Chatbot, Ecommerce Chatbot, Upselling AI, Product Recommendation AI, Website Chat, 24/7 Sales, Conversion Optimization&lt;br&gt;
&lt;strong&gt;VotesCount&lt;/strong&gt;: 🔺276&lt;br&gt;
&lt;strong&gt;Featured&lt;/strong&gt;: Yes&lt;br&gt;
&lt;strong&gt;CreatedAt&lt;/strong&gt;: 2025-08-13 07:01 AM (UTC)&lt;/p&gt;
&lt;hr&gt;
&lt;h2 id=&#34;5-fellow-api&#34;&gt;5. Fellow API
&lt;/h2&gt;&lt;p&gt;&lt;strong&gt;Tagline&lt;/strong&gt;: Build custom workflows from meeting transcripts and AI notes&lt;br&gt;
&lt;strong&gt;Description&lt;/strong&gt;: Whether you need to archive compliance records, trigger workflows when key terms appear, feed dashboards with fresh insights, or pass transcripts to an LLM for tailored recaps and action plans, Fellow’s API gives you everything you need to make it happen.&lt;br&gt;
&lt;strong&gt;Website&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/r/A6FVNK3VGLPI5S?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;open&lt;/a&gt;&lt;br&gt;
&lt;strong&gt;Product Hunt&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/products/fellow-app?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;View on Product Hunt&lt;/a&gt;&lt;br&gt;
&lt;img src=&#34;https://ph-files.imgix.net/edfbbee5-6bf5-4d6a-8d0b-ea3d80017801.png?auto=format&amp;amp;fit=crop&amp;amp;frame=1&amp;amp;h=512&amp;amp;w=1024&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Fellow API&#34;
	
	
&gt;&lt;br&gt;
&lt;strong&gt;Keyword&lt;/strong&gt;: API,Meeting Transcripts,AI Notes,Workflow Automation,Compliance,LLM Integration,Dashboards,Custom Workflows,Data Integration&lt;br&gt;
&lt;strong&gt;VotesCount&lt;/strong&gt;: 🔺248&lt;br&gt;
&lt;strong&gt;Featured&lt;/strong&gt;: Yes&lt;br&gt;
&lt;strong&gt;CreatedAt&lt;/strong&gt;: 2025-08-13 07:01 AM (UTC)&lt;/p&gt;
&lt;hr&gt;
&lt;h2 id=&#34;6-compozy&#34;&gt;6. Compozy
&lt;/h2&gt;&lt;p&gt;&lt;strong&gt;Tagline&lt;/strong&gt;: Next-level Agentic Orchestration Platform&lt;br&gt;
&lt;strong&gt;Description&lt;/strong&gt;: Create, deploy, and manage robust multi-agent systems with Compozy—unifying agents, tasks, tools, and signals into scalable YAML workflows. Powered by Go and Temporal for performance and reliability, it optimizes costs and gives enterprises full control.&lt;br&gt;
&lt;strong&gt;Website&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/r/TAJQAJYGDCH7OP?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;open&lt;/a&gt;&lt;br&gt;
&lt;strong&gt;Product Hunt&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/products/compozy?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;View on Product Hunt&lt;/a&gt;&lt;br&gt;
&lt;img src=&#34;https://ph-files.imgix.net/583eb0d0-c7e6-42a9-a02e-b2abdf593987.png?auto=format&amp;amp;fit=crop&amp;amp;frame=1&amp;amp;h=512&amp;amp;w=1024&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Compozy&#34;
	
	
&gt;&lt;br&gt;
&lt;strong&gt;Keyword&lt;/strong&gt;: Compozy, Agentic Orchestration, Multi-agent System, YAML Workflow, Go, Temporal, Workflow Automation, Enterprise Automation, Scalable Workflow, Agent Management&lt;br&gt;
&lt;strong&gt;VotesCount&lt;/strong&gt;: 🔺196&lt;br&gt;
&lt;strong&gt;Featured&lt;/strong&gt;: Yes&lt;br&gt;
&lt;strong&gt;CreatedAt&lt;/strong&gt;: 2025-08-13 07:01 AM (UTC)&lt;/p&gt;
&lt;hr&gt;
&lt;h2 id=&#34;7-reeroll&#34;&gt;7. Reeroll
&lt;/h2&gt;&lt;p&gt;&lt;strong&gt;Tagline&lt;/strong&gt;: The AI Video Editor&lt;br&gt;
&lt;strong&gt;Description&lt;/strong&gt;: Reeroll is an AI video editor for creating short, engaging videos in minutes. Choose a template, add your assets and links, and Reeroll will customize it for you. No complex editors, just a simple chat that turns ideas into ready-to-publish videos.&lt;br&gt;
&lt;strong&gt;Website&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/r/SW4XM4BZM7OHHL?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;open&lt;/a&gt;&lt;br&gt;
&lt;strong&gt;Product Hunt&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/products/reeroll?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;View on Product Hunt&lt;/a&gt;&lt;br&gt;
&lt;img src=&#34;https://ph-files.imgix.net/b6e70211-4b19-464c-ac0c-9fa917019f4a.png?auto=format&amp;amp;fit=crop&amp;amp;frame=1&amp;amp;h=512&amp;amp;w=1024&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Reeroll&#34;
	
	
&gt;&lt;br&gt;
&lt;strong&gt;Keyword&lt;/strong&gt;: AI video editor, AI video maker, short video maker, video editing tool, video creation tool, automated video editor, easy video editor, quick video editor, video templates, social media video maker&lt;br&gt;
&lt;strong&gt;VotesCount&lt;/strong&gt;: 🔺190&lt;br&gt;
&lt;strong&gt;Featured&lt;/strong&gt;: Yes&lt;br&gt;
&lt;strong&gt;CreatedAt&lt;/strong&gt;: 2025-08-13 07:01 AM (UTC)&lt;/p&gt;
&lt;hr&gt;
&lt;h2 id=&#34;8-downmark&#34;&gt;8. DownMark
&lt;/h2&gt;&lt;p&gt;&lt;strong&gt;Tagline&lt;/strong&gt;: Turn web content into clean Markdown with one click&lt;br&gt;
&lt;strong&gt;Description&lt;/strong&gt;: DownMark extracts web articles and converts them to clean Markdown files. This Safari extension intelligently filters ads and clutter, preserving only essential content with proper formatting for knowledge workers.&lt;br&gt;
&lt;strong&gt;Website&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/r/I2Y3VINSARXAYZ?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;open&lt;/a&gt;&lt;br&gt;
&lt;strong&gt;Product Hunt&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/products/downmark?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;View on Product Hunt&lt;/a&gt;&lt;br&gt;
&lt;img src=&#34;https://ph-files.imgix.net/f11e3ef2-fbda-4676-9cf6-169454489c14.png?auto=format&amp;amp;fit=crop&amp;amp;frame=1&amp;amp;h=512&amp;amp;w=1024&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;DownMark&#34;
	
	
&gt;&lt;br&gt;
&lt;strong&gt;Keyword&lt;/strong&gt;: Markdown, Safari extension, web article converter, content cleaner, knowledge worker tool, one-click Markdown, DownMark, article to Markdown&lt;br&gt;
&lt;strong&gt;VotesCount&lt;/strong&gt;: 🔺172&lt;br&gt;
&lt;strong&gt;Featured&lt;/strong&gt;: Yes&lt;br&gt;
&lt;strong&gt;CreatedAt&lt;/strong&gt;: 2025-08-13 07:01 AM (UTC)&lt;/p&gt;
&lt;hr&gt;
&lt;h2 id=&#34;9-whispering&#34;&gt;9. Whispering
&lt;/h2&gt;&lt;p&gt;&lt;strong&gt;Tagline&lt;/strong&gt;: Open-source, local-first dictation you can trust&lt;br&gt;
&lt;strong&gt;Description&lt;/strong&gt;: Whispering is an open-source, local-first transcription app. Use local and cloud models, chain custom transforms, and most importantly, keep your audio local on-device. Fast, ergonomic, and MIT-licensed. Let’s make closed-source apps obsolete. 🚀&lt;br&gt;
&lt;strong&gt;Website&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/r/4JA3PV5VOJAXLE?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;open&lt;/a&gt;&lt;br&gt;
&lt;strong&gt;Product Hunt&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/products/whispering?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;View on Product Hunt&lt;/a&gt;&lt;br&gt;
&lt;img src=&#34;https://ph-files.imgix.net/fecdba24-c2bf-4e3c-ab89-ca19db8c63bc.png?auto=format&amp;amp;fit=crop&amp;amp;frame=1&amp;amp;h=512&amp;amp;w=1024&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Whispering&#34;
	
	
&gt;&lt;br&gt;
&lt;strong&gt;Keyword&lt;/strong&gt;: open-source, dictation, transcription, local-first, privacy, speech-to-text, offline, MIT-licensed,  voice recognition,  audio transcription&lt;br&gt;
&lt;strong&gt;VotesCount&lt;/strong&gt;: 🔺157&lt;br&gt;
&lt;strong&gt;Featured&lt;/strong&gt;: Yes&lt;br&gt;
&lt;strong&gt;CreatedAt&lt;/strong&gt;: 2025-08-13 07:01 AM (UTC)&lt;/p&gt;
&lt;hr&gt;
&lt;h2 id=&#34;10-inworld-runtime&#34;&gt;10. Inworld Runtime
&lt;/h2&gt;&lt;p&gt;&lt;strong&gt;Tagline&lt;/strong&gt;: The AI runtime for top consumer applications&lt;br&gt;
&lt;strong&gt;Description&lt;/strong&gt;: The first AI-native backend engineered to power massive-scale consumer applications. Easily scale from prototype to millions. Automated MLOps frees you from maintenance. Deploy no-code experiments instantly. Battle-tested through work with NVIDIA, Google, Xbox&lt;br&gt;
&lt;strong&gt;Website&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/r/FB2RLE4N2TGE3V?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;open&lt;/a&gt;&lt;br&gt;
&lt;strong&gt;Product Hunt&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://www.producthunt.com/products/inworld-ai?utm_campaign=producthunt-api&amp;amp;utm_medium=api-v2&amp;amp;utm_source=Application%3A&amp;#43;weekly&amp;#43;%28ID%3A&amp;#43;148189%29&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;View on Product Hunt&lt;/a&gt;&lt;br&gt;
&lt;img src=&#34;https://ph-files.imgix.net/9878066a-a7e2-42e5-b080-95b00a368a9c.png?auto=format&amp;amp;fit=crop&amp;amp;frame=1&amp;amp;h=512&amp;amp;w=1024&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Inworld Runtime&#34;
	
	
&gt;&lt;br&gt;
&lt;strong&gt;Keyword&lt;/strong&gt;: AI Runtime, Consumer Applications, MLOps, AI Backend, Scalable AI, No-Code Deployment, AI Infrastructure, NVIDIA, Google, Xbox&lt;br&gt;
&lt;strong&gt;VotesCount&lt;/strong&gt;: 🔺151&lt;br&gt;
&lt;strong&gt;Featured&lt;/strong&gt;: Yes&lt;br&gt;
&lt;strong&gt;CreatedAt&lt;/strong&gt;: 2025-08-13 07:01 AM (UTC)&lt;/p&gt;
&lt;hr&gt;
</description>
        </item>
        <item>
        <title>NeMo</title>
        <link>https://producthunt.programnotes.cn/en/p/nemo/</link>
        <pubDate>Sat, 10 May 2025 15:25:32 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/nemo/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1729952832073-bf7d3d6150cd?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NDY4NjE4OTR8&amp;ixlib=rb-4.1.0" alt="Featured image of post NeMo" /&gt;&lt;h1 id=&#34;nvidianemo&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/NeMo&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NVIDIA/NeMo&lt;/a&gt;
&lt;/h1&gt;&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;http://www.repostatus.org/#active&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;http://www.repostatus.org/badges/latest/active.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Project Status: Active – The project has reached a stable, usable state and is being actively developed.&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://readthedocs.com/projects/nvidia-nemo/badge/?version=main&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Documentation&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://github.com/nvidia/nemo/actions/workflows/codeql.yml&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://github.com/nvidia/nemo/actions/workflows/codeql.yml/badge.svg?branch=main&amp;amp;event=push&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;CodeQL&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/NeMo/blob/master/LICENSE&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/badge/License-Apache%202.0-brightgreen.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;NeMo core license and license for collections in this repo&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://badge.fury.io/py/nemo-toolkit&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://badge.fury.io/py/nemo-toolkit.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Release version&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://badge.fury.io/py/nemo-toolkit&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/pypi/pyversions/nemo-toolkit.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Python version&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://pepy.tech/project/nemo-toolkit&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://static.pepy.tech/personalized-badge/nemo-toolkit?period=total&amp;amp;units=international_system&amp;amp;left_color=grey&amp;amp;right_color=brightgreen&amp;amp;left_text=downloads&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;PyPi total downloads&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://github.com/psf/black&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/badge/code%20style-black-000000.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Code style: black&#34;
	
	
&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h1 id=&#34;nvidia-nemo-framework&#34;&gt;&lt;strong&gt;NVIDIA NeMo Framework&lt;/strong&gt;
&lt;/h1&gt;&lt;h2 id=&#34;latest-news&#34;&gt;Latest News
&lt;/h2&gt;&lt;!-- markdownlint-disable --&gt;
&lt;details open&gt;
  &lt;summary&gt;&lt;b&gt;Pretrain and finetune :hugs:Hugging Face models via AutoModel&lt;/b&gt;&lt;/summary&gt;
      Nemo Framework&#39;s latest feature AutoModel enables broad support for :hugs:Hugging Face models, with 25.02 focusing on &lt;a href=https://huggingface.co/transformers/v3.5.1/model_doc/auto.html#automodelforcausallm&gt;AutoModelForCausalLM&lt;a&gt; in the &lt;a href=https://huggingface.co/models?pipeline_tag=text-generation&amp;sort=trending&gt;text generation category&lt;a&gt;. Future releases will enable support for more model families such as Vision Language Model.
&lt;/details&gt;
&lt;details open&gt;
  &lt;summary&gt;&lt;b&gt;Training on Blackwell using Nemo&lt;/b&gt;&lt;/summary&gt;
      NeMo Framework has added Blackwell support, with 25.02 focusing on functional parity for B200. More optimizations to come in the upcoming releases.
&lt;/details&gt;
&lt;details open&gt;
  &lt;summary&gt;&lt;b&gt;NeMo Framework 2.0&lt;/b&gt;&lt;/summary&gt;
      We&#39;ve released NeMo 2.0, an update on the NeMo Framework which prioritizes modularity and ease-of-use. Please refer to the &lt;a href=https://docs.nvidia.com/nemo-framework/user-guide/latest/nemo-2.0/index.html&gt;NeMo Framework User Guide&lt;/a&gt; to get started.
&lt;/details&gt;
&lt;details open&gt;
  &lt;summary&gt;&lt;b&gt;New Cosmos World Foundation Models Support&lt;/b&gt;&lt;/summary&gt;
    &lt;details&gt; 
      &lt;summary&gt; &lt;a href=&#34;https://developer.nvidia.com/blog/advancing-physical-ai-with-nvidia-cosmos-world-foundation-model-platform&#34;&gt;Advancing Physical AI with NVIDIA Cosmos World Foundation Model Platform &lt;/a&gt; (2025-01-09) 
      &lt;/summary&gt; 
        The end-to-end NVIDIA Cosmos platform accelerates world model development for physical AI systems. Built on CUDA, Cosmos combines state-of-the-art world foundation models, video tokenizers, and AI-accelerated data processing pipelines. Developers can accelerate world model development by fine-tuning Cosmos world foundation models or building new ones from the ground up. These models create realistic synthetic videos of environments and interactions, providing a scalable foundation for training complex systems, from simulating humanoid robots performing advanced actions to developing end-to-end autonomous driving models. 
        &lt;br&gt;&lt;br&gt;
    &lt;/details&gt;
    &lt;details&gt;
      &lt;summary&gt;
        &lt;a href=&#34;https://developer.nvidia.com/blog/accelerate-custom-video-foundation-model-pipelines-with-new-nvidia-nemo-framework-capabilities/&#34;&gt;
          Accelerate Custom Video Foundation Model Pipelines with New NVIDIA NeMo Framework Capabilities
        &lt;/a&gt; (2025-01-07)
      &lt;/summary&gt;
        The NeMo Framework now supports training and customizing the &lt;a href=&#34;https://github.com/NVIDIA/Cosmos&#34;&gt;NVIDIA Cosmos&lt;/a&gt; collection of world foundation models. Cosmos leverages advanced text-to-world generation techniques to create fluid, coherent video content from natural language prompts.
        &lt;br&gt;&lt;br&gt;
        You can also now accelerate your video processing step using the &lt;a href=&#34;https://developer.nvidia.com/nemo-curator-video-processing-early-access&#34;&gt;NeMo Curator&lt;/a&gt; library, which provides optimized video processing and captioning features that can deliver up to 89x faster video processing when compared to an unoptimized CPU pipeline.
      &lt;br&gt;&lt;br&gt;
    &lt;/details&gt;
&lt;/details&gt;
&lt;details open&gt;
  &lt;summary&gt;&lt;b&gt;Large Language Models and Multimodal Models&lt;/b&gt;&lt;/summary&gt;
    &lt;details&gt;
      &lt;summary&gt;
        &lt;a href=&#34;https://developer.nvidia.com/blog/state-of-the-art-multimodal-generative-ai-model-development-with-nvidia-nemo/&#34;&gt;
          State-of-the-Art Multimodal Generative AI Model Development with NVIDIA NeMo
        &lt;/a&gt; (2024-11-06)
      &lt;/summary&gt;
        NVIDIA recently announced significant enhancements to the NeMo platform, focusing on multimodal generative AI models. The update includes NeMo Curator and the Cosmos tokenizer, which streamline the data curation process and enhance the quality of visual data. These tools are designed to handle large-scale data efficiently, making it easier to develop high-quality AI models for various applications, including robotics and autonomous driving. The Cosmos tokenizers, in particular, efficiently map visual data into compact, semantic tokens, which is crucial for training large-scale generative models. The tokenizer is available now on the &lt;a href=http://github.com/NVIDIA/cosmos-tokenizer/NVIDIA/cosmos-tokenizer&gt;NVIDIA/cosmos-tokenizer&lt;/a&gt; GitHub repo and on &lt;a href=https://huggingface.co/nvidia/Cosmos-Tokenizer-CV8x8x8&gt;Hugging Face&lt;/a&gt;.
      &lt;br&gt;&lt;br&gt;
    &lt;/details&gt;
    &lt;details&gt;
      &lt;summary&gt;
        &lt;a href=&#34;https://docs.nvidia.com/nemo-framework/user-guide/latest/llms/llama/index.html#new-llama-3-1-support for more information/&#34;&gt;
        New Llama 3.1 Support
        &lt;/a&gt; (2024-07-23)
      &lt;/summary&gt;
        The NeMo Framework now supports training and customizing the Llama 3.1 collection of LLMs from Meta.
      &lt;br&gt;&lt;br&gt;
    &lt;/details&gt;
    &lt;details&gt;
      &lt;summary&gt;
        &lt;a href=&#34;https://aws.amazon.com/blogs/machine-learning/accelerate-your-generative-ai-distributed-training-workloads-with-the-nvidia-nemo-framework-on-amazon-eks/&#34;&gt;
          Accelerate your Generative AI Distributed Training Workloads with the NVIDIA NeMo Framework on Amazon EKS
        &lt;/a&gt; (2024-07-16)
      &lt;/summary&gt;
     NVIDIA NeMo Framework now runs distributed training workloads on an Amazon Elastic Kubernetes Service (Amazon EKS) cluster. For step-by-step instructions on creating an EKS cluster and running distributed training workloads with NeMo, see the GitHub repository &lt;a href=&#34;https://github.com/aws-samples/awsome-distributed-training/tree/main/3.test_cases/2.nemo-launcher/EKS/&#34;&gt; here.&lt;/a&gt;
      &lt;br&gt;&lt;br&gt;
    &lt;/details&gt;
    &lt;details&gt;
      &lt;summary&gt;
        &lt;a href=&#34;https://developer.nvidia.com/blog/nvidia-nemo-accelerates-llm-innovation-with-hybrid-state-space-model-support/&#34;&gt;
          NVIDIA NeMo Accelerates LLM Innovation with Hybrid State Space Model Support
        &lt;/a&gt; (2024/06/17)
      &lt;/summary&gt;
     NVIDIA NeMo and Megatron Core now support pre-training and fine-tuning of state space models (SSMs). NeMo also supports training models based on the Griffin architecture as described by Google DeepMind. 
      &lt;br&gt;&lt;br&gt;
    &lt;/details&gt;
      &lt;details&gt;
      &lt;summary&gt;
        &lt;a href=&#34;https://huggingface.co/models?sort=trending&amp;search=nvidia%2Fnemotron-4-340B&#34;&gt;
          NVIDIA releases 340B base, instruct, and reward models pretrained on a total of 9T tokens.
        &lt;/a&gt; (2024-06-18)
      &lt;/summary&gt;
      See documentation and tutorials for SFT, PEFT, and PTQ with 
      &lt;a href=&#34;https://docs.nvidia.com/nemo-framework/user-guide/latest/llms/nemotron/index.html&#34;&gt;
        Nemotron 340B 
      &lt;/a&gt;
      in the NeMo Framework User Guide.
      &lt;br&gt;&lt;br&gt;
    &lt;/details&gt;
    &lt;details&gt;
      &lt;summary&gt;
        &lt;a href=&#34;https://developer.nvidia.com/blog/nvidia-sets-new-generative-ai-performance-and-scale-records-in-mlperf-training-v4-0/&#34;&gt;
          NVIDIA sets new generative AI performance and scale records in MLPerf Training v4.0
        &lt;/a&gt; (2024/06/12)
      &lt;/summary&gt;
      Using NVIDIA NeMo Framework and NVIDIA Hopper GPUs NVIDIA was able to scale to 11,616 H100 GPUs and achieve near-linear performance scaling on LLM pretraining. 
      NVIDIA also achieved the highest LLM fine-tuning performance and raised the bar for text-to-image training.
      &lt;br&gt;&lt;br&gt;
    &lt;/details&gt;
    &lt;details&gt;
        &lt;summary&gt;
          &lt;a href=&#34;https://cloud.google.com/blog/products/compute/gke-and-nvidia-nemo-framework-to-train-generative-ai-models&#34;&gt;
            Accelerate your generative AI journey with NVIDIA NeMo Framework on GKE
          &lt;/a&gt; (2024/03/16)
        &lt;/summary&gt;
        An end-to-end walkthrough to train generative AI models on the Google Kubernetes Engine (GKE) using the NVIDIA NeMo Framework is available at https://github.com/GoogleCloudPlatform/nvidia-nemo-on-gke. 
        The walkthrough includes detailed instructions on how to set up a Google Cloud Project and pre-train a GPT model using the NeMo Framework.
        &lt;br&gt;&lt;br&gt;
      &lt;/details&gt;
&lt;/details&gt;
&lt;details open&gt;
  &lt;summary&gt;&lt;b&gt;Speech Recognition&lt;/b&gt;&lt;/summary&gt;
  &lt;details&gt;
      &lt;summary&gt;
        &lt;a href=&#34;https://developer.nvidia.com/blog/accelerating-leaderboard-topping-asr-models-10x-with-nvidia-nemo/&#34;&gt;
          Accelerating Leaderboard-Topping ASR Models 10x with NVIDIA NeMo
        &lt;/a&gt; (2024/09/24)
      &lt;/summary&gt;
      NVIDIA NeMo team released a number of inference optimizations for CTC, RNN-T, and TDT models that resulted in up to 10x inference speed-up. 
      These models now exceed an inverse real-time factor (RTFx) of 2,000, with some reaching RTFx of even 6,000.
      &lt;br&gt;&lt;br&gt;
    &lt;/details&gt;
    &lt;details&gt;
      &lt;summary&gt;
        &lt;a href=&#34;https://developer.nvidia.com/blog/new-standard-for-speech-recognition-and-translation-from-the-nvidia-nemo-canary-model/&#34;&gt;
          New Standard for Speech Recognition and Translation from the NVIDIA NeMo Canary Model
        &lt;/a&gt; (2024/04/18)
      &lt;/summary&gt;
      The NeMo team just released Canary, a multilingual model that transcribes speech in English, Spanish, German, and French with punctuation and capitalization. 
      Canary also provides bi-directional translation, between English and the three other supported languages.
      &lt;br&gt;&lt;br&gt;
    &lt;/details&gt;
    &lt;details&gt;
      &lt;summary&gt;
        &lt;a href=&#34;https://developer.nvidia.com/blog/pushing-the-boundaries-of-speech-recognition-with-nemo-parakeet-asr-models/&#34;&gt;
          Pushing the Boundaries of Speech Recognition with NVIDIA NeMo Parakeet ASR Models
        &lt;/a&gt; (2024/04/18)
      &lt;/summary&gt;
      NVIDIA NeMo, an end-to-end platform for the development of multimodal generative AI models at scale anywhere—on any cloud and on-premises—released the Parakeet family of automatic speech recognition (ASR) models. 
      These state-of-the-art ASR models, developed in collaboration with Suno.ai, transcribe spoken English with exceptional accuracy.
      &lt;br&gt;&lt;br&gt;
    &lt;/details&gt;
  &lt;details&gt;
    &lt;summary&gt;
      &lt;a href=&#34;https://developer.nvidia.com/blog/turbocharge-asr-accuracy-and-speed-with-nvidia-nemo-parakeet-tdt/&#34;&gt;
        Turbocharge ASR Accuracy and Speed with NVIDIA NeMo Parakeet-TDT
      &lt;/a&gt; (2024/04/18)
    &lt;/summary&gt;
    NVIDIA NeMo, an end-to-end platform for developing multimodal generative AI models at scale anywhere—on any cloud and on-premises—recently released Parakeet-TDT. 
    This new addition to the  NeMo ASR Parakeet model family boasts better accuracy and 64% greater speed over the previously best model, Parakeet-RNNT-1.1B.
    &lt;br&gt;&lt;br&gt;
  &lt;/details&gt;
&lt;/details&gt;
&lt;!-- markdownlint-enable --&gt;
&lt;h2 id=&#34;introduction&#34;&gt;Introduction
&lt;/h2&gt;&lt;p&gt;NVIDIA NeMo Framework is a scalable and cloud-native generative AI
framework built for researchers and PyTorch developers working on Large
Language Models (LLMs), Multimodal Models (MMs), Automatic Speech
Recognition (ASR), Text to Speech (TTS), and Computer Vision (CV)
domains. It is designed to help you efficiently create, customize, and
deploy new generative AI models by leveraging existing code and
pre-trained model checkpoints.&lt;/p&gt;
&lt;p&gt;For technical documentation, please see the &lt;a class=&#34;link&#34; href=&#34;https://docs.nvidia.com/nemo-framework/user-guide/latest/playbooks/index.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NeMo Framework User
Guide&lt;/a&gt;.&lt;/p&gt;
&lt;h2 id=&#34;whats-new-in-nemo-20&#34;&gt;What&amp;rsquo;s New in NeMo 2.0
&lt;/h2&gt;&lt;p&gt;NVIDIA NeMo 2.0 introduces several significant improvements over its predecessor, NeMo 1.0, enhancing flexibility, performance, and scalability.&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;Python-Based Configuration&lt;/strong&gt; - NeMo 2.0 transitions from YAML files to a Python-based configuration, providing more flexibility and control. This shift makes it easier to extend and customize configurations programmatically.&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;Modular Abstractions&lt;/strong&gt; - By adopting PyTorch Lightning’s modular abstractions, NeMo 2.0 simplifies adaptation and experimentation. This modular approach allows developers to more easily modify and experiment with different components of their models.&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;Scalability&lt;/strong&gt; - NeMo 2.0 seamlessly scaling large-scale experiments across thousands of GPUs using &lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/NeMo-Run&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NeMo-Run&lt;/a&gt;, a powerful tool designed to streamline the configuration, execution, and management of machine learning experiments across computing environments.&lt;/p&gt;
&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;Overall, these enhancements make NeMo 2.0 a powerful, scalable, and user-friendly framework for AI model development.&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;[!IMPORTANT]&lt;br&gt;
NeMo 2.0 is currently supported by the LLM (large language model) and VLM (vision language model) collections.&lt;/p&gt;
&lt;/blockquote&gt;
&lt;h3 id=&#34;get-started-with-nemo-20&#34;&gt;Get Started with NeMo 2.0
&lt;/h3&gt;&lt;ul&gt;
&lt;li&gt;Refer to the &lt;a class=&#34;link&#34; href=&#34;https://docs.nvidia.com/nemo-framework/user-guide/latest/nemo-2.0/quickstart.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Quickstart&lt;/a&gt; for examples of using NeMo-Run to launch NeMo 2.0 experiments locally and on a slurm cluster.&lt;/li&gt;
&lt;li&gt;For more information about NeMo 2.0, see the &lt;a class=&#34;link&#34; href=&#34;https://docs.nvidia.com/nemo-framework/user-guide/latest/nemo-2.0/index.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NeMo Framework User Guide&lt;/a&gt;.&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/llm/recipes&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NeMo 2.0 Recipes&lt;/a&gt; contains additional examples of launching large-scale runs using NeMo 2.0 and NeMo-Run.&lt;/li&gt;
&lt;li&gt;For an in-depth exploration of the main features of NeMo 2.0, see the &lt;a class=&#34;link&#34; href=&#34;https://docs.nvidia.com/nemo-framework/user-guide/latest/nemo-2.0/features/index.html#feature-guide&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Feature Guide&lt;/a&gt;.&lt;/li&gt;
&lt;li&gt;To transition from NeMo 1.0 to 2.0, see the &lt;a class=&#34;link&#34; href=&#34;https://docs.nvidia.com/nemo-framework/user-guide/latest/nemo-2.0/migration/index.html#migration-guide&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Migration Guide&lt;/a&gt; for step-by-step instructions.&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;get-started-with-cosmos&#34;&gt;Get Started with Cosmos
&lt;/h3&gt;&lt;p&gt;NeMo Curator and NeMo Framework support video curation and post-training of the Cosmos World Foundation Models, which are open and available on &lt;a class=&#34;link&#34; href=&#34;https://catalog.ngc.nvidia.com/orgs/nvidia/teams/cosmos/collections/cosmos&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NGC&lt;/a&gt; and &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/collections/nvidia/cosmos-6751e884dc10e013a0a0d8e6&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Hugging Face&lt;/a&gt;. For more information on video datasets, refer to &lt;a class=&#34;link&#34; href=&#34;https://developer.nvidia.com/nemo-curator&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NeMo Curator&lt;/a&gt;. To post-train World Foundation Models using the NeMo Framework for your custom physical AI tasks, see the &lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/Cosmos/blob/main/cosmos1/models/diffusion/nemo/post_training/README.md&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Cosmos Diffusion models&lt;/a&gt; and the &lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/Cosmos/blob/main/cosmos1/models/autoregressive/nemo/post_training/README.md&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Cosmos Autoregressive models&lt;/a&gt;.&lt;/p&gt;
&lt;h2 id=&#34;llms-and-mms-training-alignment-and-customization&#34;&gt;LLMs and MMs Training, Alignment, and Customization
&lt;/h2&gt;&lt;p&gt;All NeMo models are trained with
&lt;a class=&#34;link&#34; href=&#34;https://github.com/Lightning-AI/lightning&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Lightning&lt;/a&gt;. Training is
automatically scalable to 1000s of GPUs. You can check the performance benchmarks using the
latest NeMo Framework container &lt;a class=&#34;link&#34; href=&#34;https://docs.nvidia.com/nemo-framework/user-guide/latest/performance/performance_summary.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;here&lt;/a&gt;.&lt;/p&gt;
&lt;p&gt;When applicable, NeMo models leverage cutting-edge distributed training
techniques, incorporating &lt;a class=&#34;link&#34; href=&#34;https://docs.nvidia.com/nemo-framework/user-guide/latest/modeloverview.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;parallelism
strategies&lt;/a&gt;
to enable efficient training of very large models. These techniques
include Tensor Parallelism (TP), Pipeline Parallelism (PP), Fully
Sharded Data Parallelism (FSDP), Mixture-of-Experts (MoE), and Mixed
Precision Training with BFloat16 and FP8, as well as others.&lt;/p&gt;
&lt;p&gt;NeMo Transformer-based LLMs and MMs utilize &lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/TransformerEngine&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NVIDIA Transformer
Engine&lt;/a&gt; for FP8 training on
NVIDIA Hopper GPUs, while leveraging &lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/Megatron-LM/tree/main/megatron/core&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NVIDIA Megatron
Core&lt;/a&gt; for
scaling Transformer model training.&lt;/p&gt;
&lt;p&gt;NeMo LLMs can be aligned with state-of-the-art methods such as SteerLM,
Direct Preference Optimization (DPO), and Reinforcement Learning from
Human Feedback (RLHF). See &lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/NeMo-Aligner&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NVIDIA NeMo
Aligner&lt;/a&gt; for more information.&lt;/p&gt;
&lt;p&gt;In addition to supervised fine-tuning (SFT), NeMo also supports the
latest parameter efficient fine-tuning (PEFT) techniques such as LoRA,
P-Tuning, Adapters, and IA3. Refer to the &lt;a class=&#34;link&#34; href=&#34;https://docs.nvidia.com/nemo-framework/user-guide/latest/sft_peft/index.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NeMo Framework User
Guide&lt;/a&gt;
for the full list of supported models and techniques.&lt;/p&gt;
&lt;h2 id=&#34;llms-and-mms-deployment-and-optimization&#34;&gt;LLMs and MMs Deployment and Optimization
&lt;/h2&gt;&lt;p&gt;NeMo LLMs and MMs can be deployed and optimized with &lt;a class=&#34;link&#34; href=&#34;https://developer.nvidia.com/nemo-microservices-early-access&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NVIDIA NeMo
Microservices&lt;/a&gt;.&lt;/p&gt;
&lt;h2 id=&#34;speech-ai&#34;&gt;Speech AI
&lt;/h2&gt;&lt;p&gt;NeMo ASR and TTS models can be optimized for inference and deployed for
production use cases with &lt;a class=&#34;link&#34; href=&#34;https://developer.nvidia.com/riva&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NVIDIA Riva&lt;/a&gt;.&lt;/p&gt;
&lt;h2 id=&#34;nemo-framework-launcher&#34;&gt;NeMo Framework Launcher
&lt;/h2&gt;&lt;blockquote&gt;
&lt;p&gt;[!IMPORTANT]&lt;br&gt;
NeMo Framework Launcher is compatible with NeMo version 1.0 only. &lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/NeMo-Run&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NeMo-Run&lt;/a&gt; is recommended for launching experiments using NeMo 2.0.&lt;/p&gt;
&lt;/blockquote&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/NeMo-Megatron-Launcher&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NeMo Framework
Launcher&lt;/a&gt; is a
cloud-native tool that streamlines the NeMo Framework experience. It is
used for launching end-to-end NeMo Framework training jobs on CSPs and
Slurm clusters.&lt;/p&gt;
&lt;p&gt;The NeMo Framework Launcher includes extensive recipes, scripts,
utilities, and documentation for training NeMo LLMs. It also includes
the NeMo Framework &lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/NeMo-Megatron-Launcher#53-using-autoconfigurator-to-find-the-optimal-configuration&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Autoconfigurator&lt;/a&gt;,
which is designed to find the optimal model parallel configuration for
training on a specific cluster.&lt;/p&gt;
&lt;p&gt;To get started quickly with the NeMo Framework Launcher, please see the
&lt;a class=&#34;link&#34; href=&#34;https://docs.nvidia.com/nemo-framework/user-guide/latest/playbooks/index.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NeMo Framework
Playbooks&lt;/a&gt;.
The NeMo Framework Launcher does not currently support ASR and TTS
training, but it will soon.&lt;/p&gt;
&lt;h2 id=&#34;get-started-with-nemo-framework&#34;&gt;Get Started with NeMo Framework
&lt;/h2&gt;&lt;p&gt;Getting started with NeMo Framework is easy. State-of-the-art pretrained
NeMo models are freely available on &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/models?library=nemo&amp;amp;sort=downloads&amp;amp;search=nvidia&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Hugging Face
Hub&lt;/a&gt;
and &lt;a class=&#34;link&#34; href=&#34;https://catalog.ngc.nvidia.com/models?query=nemo&amp;amp;orderBy=weightPopularDESC&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NVIDIA
NGC&lt;/a&gt;.
These models can be used to generate text or images, transcribe audio,
and synthesize speech in just a few lines of code.&lt;/p&gt;
&lt;p&gt;We have extensive
&lt;a class=&#34;link&#34; href=&#34;https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/starthere/tutorials.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;tutorials&lt;/a&gt;
that can be run on &lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Google Colab&lt;/a&gt; or
with our &lt;a class=&#34;link&#34; href=&#34;https://catalog.ngc.nvidia.com/orgs/nvidia/containers/nemo&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NGC NeMo Framework
Container&lt;/a&gt;.
We also have
&lt;a class=&#34;link&#34; href=&#34;https://docs.nvidia.com/nemo-framework/user-guide/latest/playbooks/index.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;playbooks&lt;/a&gt;
for users who want to train NeMo models with the NeMo Framework
Launcher.&lt;/p&gt;
&lt;p&gt;For advanced users who want to train NeMo models from scratch or
fine-tune existing NeMo models, we have a full suite of &lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/NeMo/tree/main/examples&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;example
scripts&lt;/a&gt; that support
multi-GPU/multi-node training.&lt;/p&gt;
&lt;h2 id=&#34;key-features&#34;&gt;Key Features
&lt;/h2&gt;&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;nemo/collections/nlp/README.md&#34; &gt;Large Language Models&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;nemo/collections/multimodal/README.md&#34; &gt;Multimodal&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;nemo/collections/asr/README.md&#34; &gt;Automatic Speech Recognition&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;nemo/collections/tts/README.md&#34; &gt;Text to Speech&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;nemo/collections/vision/README.md&#34; &gt;Computer Vision&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;requirements&#34;&gt;Requirements
&lt;/h2&gt;&lt;ul&gt;
&lt;li&gt;Python 3.10 or above&lt;/li&gt;
&lt;li&gt;Pytorch 2.5 or above&lt;/li&gt;
&lt;li&gt;NVIDIA GPU (if you intend to do model training)&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;developer-documentation&#34;&gt;Developer Documentation
&lt;/h2&gt;&lt;table&gt;
  &lt;thead&gt;
      &lt;tr&gt;
          &lt;th&gt;Version&lt;/th&gt;
          &lt;th&gt;Status&lt;/th&gt;
          &lt;th&gt;Description&lt;/th&gt;
      &lt;/tr&gt;
  &lt;/thead&gt;
  &lt;tbody&gt;
      &lt;tr&gt;
          &lt;td&gt;Latest&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://readthedocs.com/projects/nvidia-nemo/badge/?version=main&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Documentation Status&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Documentation of the latest (i.e. main) branch.&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;Stable&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://readthedocs.com/projects/nvidia-nemo/badge/?version=stable&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Documentation Status&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Documentation of the stable (i.e. most recent release)&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
  &lt;/tbody&gt;
&lt;/table&gt;
&lt;h2 id=&#34;install-nemo-framework&#34;&gt;Install NeMo Framework
&lt;/h2&gt;&lt;p&gt;The NeMo Framework can be installed in a variety of ways, depending on
your needs. Depending on the domain, you may find one of the following
installation methods more suitable.&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;#conda--pip&#34; &gt;Conda / Pip&lt;/a&gt;: Install NeMo-Framework with native Pip into a virtual environment.
&lt;ul&gt;
&lt;li&gt;Used to explore NeMo on any supported platform.&lt;/li&gt;
&lt;li&gt;This is the recommended method for ASR and TTS domains.&lt;/li&gt;
&lt;li&gt;Limited feature-completeness for other domains.&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;#ngc-pytorch-container&#34; &gt;NGC PyTorch container&lt;/a&gt;: Install NeMo-Framework from source with feature-completeness into a highly optimized container.
&lt;ul&gt;
&lt;li&gt;For users that want to install from source in a highly optimized container.&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;#ngc-nemo-container&#34; &gt;NGC NeMo container&lt;/a&gt;: Ready-to-go solution of NeMo-Framework
&lt;ul&gt;
&lt;li&gt;For users that seek highest performance.&lt;/li&gt;
&lt;li&gt;Contains all dependencies installed and tested for performance and convergence.&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;support-matrix&#34;&gt;Support matrix
&lt;/h3&gt;&lt;p&gt;NeMo-Framework provides tiers of support based on OS / Platform and mode of installation. Please refer the following overview of support levels:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Fully supported: Max performance and feature-completeness.&lt;/li&gt;
&lt;li&gt;Limited supported: Used to explore NeMo.&lt;/li&gt;
&lt;li&gt;No support yet: In development.&lt;/li&gt;
&lt;li&gt;Deprecated: Support has reached end of life.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;Please refer to the following table for current support levels:&lt;/p&gt;
&lt;table&gt;
  &lt;thead&gt;
      &lt;tr&gt;
          &lt;th&gt;OS / Platform&lt;/th&gt;
          &lt;th&gt;Install from PyPi&lt;/th&gt;
          &lt;th&gt;Source into NGC container&lt;/th&gt;
      &lt;/tr&gt;
  &lt;/thead&gt;
  &lt;tbody&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;code&gt;linux&lt;/code&gt; - &lt;code&gt;amd64/x84_64&lt;/code&gt;&lt;/td&gt;
          &lt;td&gt;Limited support&lt;/td&gt;
          &lt;td&gt;Full support&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;code&gt;linux&lt;/code&gt; - &lt;code&gt;arm64&lt;/code&gt;&lt;/td&gt;
          &lt;td&gt;Limited support&lt;/td&gt;
          &lt;td&gt;Limited support&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;code&gt;darwin&lt;/code&gt; - &lt;code&gt;amd64/x64_64&lt;/code&gt;&lt;/td&gt;
          &lt;td&gt;Deprecated&lt;/td&gt;
          &lt;td&gt;Deprecated&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;code&gt;darwin&lt;/code&gt; - &lt;code&gt;arm64&lt;/code&gt;&lt;/td&gt;
          &lt;td&gt;Limited support&lt;/td&gt;
          &lt;td&gt;Limited support&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;code&gt;windows&lt;/code&gt; - &lt;code&gt;amd64/x64_64&lt;/code&gt;&lt;/td&gt;
          &lt;td&gt;No support yet&lt;/td&gt;
          &lt;td&gt;No support yet&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;code&gt;windows&lt;/code&gt; - &lt;code&gt;arm64&lt;/code&gt;&lt;/td&gt;
          &lt;td&gt;No support yet&lt;/td&gt;
          &lt;td&gt;No support yet&lt;/td&gt;
      &lt;/tr&gt;
  &lt;/tbody&gt;
&lt;/table&gt;
&lt;h3 id=&#34;conda--pip&#34;&gt;Conda / Pip
&lt;/h3&gt;&lt;p&gt;Install NeMo in a fresh Conda environment:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;conda create --name nemo &lt;span class=&#34;nv&#34;&gt;python&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;==&lt;/span&gt;3.10.12
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;conda activate nemo
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h4 id=&#34;pick-the-right-version&#34;&gt;Pick the right version
&lt;/h4&gt;&lt;p&gt;NeMo-Framework publishes pre-built wheels with each release.
To install nemo_toolkit from such a wheel, use the following installation method:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;pip install &lt;span class=&#34;s2&#34;&gt;&amp;#34;nemo_toolkit[all]&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;If a more specific version is desired, we recommend a Pip-VCS install. From &lt;a class=&#34;link&#34; href=&#34;github.com/NVIDIA/NeMo&#34; &gt;NVIDIA/NeMo&lt;/a&gt;, fetch the commit, branch, or tag that you would like to install.&lt;br&gt;
To install nemo_toolkit from this Git reference &lt;code&gt;$REF&lt;/code&gt;, use the following installation method:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;git clone https://github.com/NVIDIA/NeMo
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;cd&lt;/span&gt; NeMo
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;git checkout @&lt;span class=&#34;si&#34;&gt;${&lt;/span&gt;&lt;span class=&#34;nv&#34;&gt;REF&lt;/span&gt;&lt;span class=&#34;k&#34;&gt;:-&lt;/span&gt;&lt;span class=&#34;s1&#34;&gt;&amp;#39;main&amp;#39;&lt;/span&gt;&lt;span class=&#34;si&#34;&gt;}&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;pip install &lt;span class=&#34;s1&#34;&gt;&amp;#39;.[all]&amp;#39;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h4 id=&#34;install-a-specific-domain&#34;&gt;Install a specific Domain
&lt;/h4&gt;&lt;p&gt;To install a specific domain of NeMo, you must first install the
nemo_toolkit using the instructions listed above. Then, you run the
following domain-specific commands:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;6
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;pip install nemo_toolkit&lt;span class=&#34;o&#34;&gt;[&lt;/span&gt;&lt;span class=&#34;s1&#34;&gt;&amp;#39;all&amp;#39;&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;]&lt;/span&gt; &lt;span class=&#34;c1&#34;&gt;# or pip install &amp;#34;nemo_toolkit[&amp;#39;all&amp;#39;]@git+https://github.com/NVIDIA/NeMo@${REF:-&amp;#39;main&amp;#39;}&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;pip install nemo_toolkit&lt;span class=&#34;o&#34;&gt;[&lt;/span&gt;&lt;span class=&#34;s1&#34;&gt;&amp;#39;asr&amp;#39;&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;]&lt;/span&gt; &lt;span class=&#34;c1&#34;&gt;# or pip install &amp;#34;nemo_toolkit[&amp;#39;asr&amp;#39;]@git+https://github.com/NVIDIA/NeMo@$REF:-&amp;#39;main&amp;#39;}&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;pip install nemo_toolkit&lt;span class=&#34;o&#34;&gt;[&lt;/span&gt;&lt;span class=&#34;s1&#34;&gt;&amp;#39;nlp&amp;#39;&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;]&lt;/span&gt; &lt;span class=&#34;c1&#34;&gt;# or pip install &amp;#34;nemo_toolkit[&amp;#39;nlp&amp;#39;]@git+https://github.com/NVIDIA/NeMo@${REF:-&amp;#39;main&amp;#39;}&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;pip install nemo_toolkit&lt;span class=&#34;o&#34;&gt;[&lt;/span&gt;&lt;span class=&#34;s1&#34;&gt;&amp;#39;tts&amp;#39;&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;]&lt;/span&gt; &lt;span class=&#34;c1&#34;&gt;# or pip install &amp;#34;nemo_toolkit[&amp;#39;tts&amp;#39;]@git+https://github.com/NVIDIA/NeMo@${REF:-&amp;#39;main&amp;#39;}&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;pip install nemo_toolkit&lt;span class=&#34;o&#34;&gt;[&lt;/span&gt;&lt;span class=&#34;s1&#34;&gt;&amp;#39;vision&amp;#39;&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;]&lt;/span&gt; &lt;span class=&#34;c1&#34;&gt;# or pip install &amp;#34;nemo_toolkit[&amp;#39;vision&amp;#39;]@git+https://github.com/NVIDIA/NeMo@${REF:-&amp;#39;main&amp;#39;}&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;pip install nemo_toolkit&lt;span class=&#34;o&#34;&gt;[&lt;/span&gt;&lt;span class=&#34;s1&#34;&gt;&amp;#39;multimodal&amp;#39;&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;]&lt;/span&gt; &lt;span class=&#34;c1&#34;&gt;# or pip install &amp;#34;nemo_toolkit[&amp;#39;multimodal&amp;#39;]@git+https://github.com/NVIDIA/NeMo@${REF:-&amp;#39;main&amp;#39;}&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h3 id=&#34;ngc-pytorch-container&#34;&gt;NGC PyTorch container
&lt;/h3&gt;&lt;p&gt;&lt;strong&gt;NOTE: The following steps are supported beginning with 24.04 (NeMo-Toolkit 2.3.0)&lt;/strong&gt;&lt;/p&gt;
&lt;p&gt;We recommended that you start with a base NVIDIA PyTorch container:
nvcr.io/nvidia/pytorch:25.01-py3.&lt;/p&gt;
&lt;p&gt;If starting with a base NVIDIA PyTorch container, you must first launch
the container:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;6
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;7
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;8
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;docker run &lt;span class=&#34;se&#34;&gt;\
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  --gpus all &lt;span class=&#34;se&#34;&gt;\
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  -it &lt;span class=&#34;se&#34;&gt;\
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  --rm &lt;span class=&#34;se&#34;&gt;\
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  --shm-size&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;16g &lt;span class=&#34;se&#34;&gt;\
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  --ulimit &lt;span class=&#34;nv&#34;&gt;memlock&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;-1 &lt;span class=&#34;se&#34;&gt;\
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  --ulimit &lt;span class=&#34;nv&#34;&gt;stack&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;m&#34;&gt;67108864&lt;/span&gt; &lt;span class=&#34;se&#34;&gt;\
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  nvcr.io/nvidia/pytorch:&lt;span class=&#34;si&#34;&gt;${&lt;/span&gt;&lt;span class=&#34;nv&#34;&gt;NV_PYTORCH_TAG&lt;/span&gt;&lt;span class=&#34;k&#34;&gt;:-&lt;/span&gt;&lt;span class=&#34;s1&#34;&gt;&amp;#39;nvcr.io/nvidia/pytorch:25.01-py3&amp;#39;&lt;/span&gt;&lt;span class=&#34;si&#34;&gt;}&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;From &lt;a class=&#34;link&#34; href=&#34;github.com/NVIDIA/NeMo&#34; &gt;NVIDIA/NeMo&lt;/a&gt;, fetch the commit/branch/tag that you want to install.&lt;br&gt;
To install nemo_toolkit including all of its dependencies from this Git reference &lt;code&gt;$REF&lt;/code&gt;, use the following installation method:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;5
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;cd&lt;/span&gt; /opt
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;git clone https://github.com/NVIDIA/NeMo
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;cd&lt;/span&gt; NeMo
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;git checkout &lt;span class=&#34;si&#34;&gt;${&lt;/span&gt;&lt;span class=&#34;nv&#34;&gt;REF&lt;/span&gt;&lt;span class=&#34;k&#34;&gt;:-&lt;/span&gt;&lt;span class=&#34;s1&#34;&gt;&amp;#39;main&amp;#39;&lt;/span&gt;&lt;span class=&#34;si&#34;&gt;}&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;bash reinstall.sh --library all
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h2 id=&#34;ngc-nemo-container&#34;&gt;NGC NeMo container
&lt;/h2&gt;&lt;p&gt;NeMo containers are launched concurrently with NeMo version updates.
NeMo Framework now supports LLMs, MMs, ASR, and TTS in a single
consolidated Docker container. You can find additional information about
released containers on the &lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/NeMo/releases&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NeMo releases
page&lt;/a&gt;.&lt;/p&gt;
&lt;p&gt;To use a pre-built container, run the following code:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;6
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;7
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;8
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;docker run &lt;span class=&#34;se&#34;&gt;\
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  --gpus all &lt;span class=&#34;se&#34;&gt;\
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  -it &lt;span class=&#34;se&#34;&gt;\
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  --rm &lt;span class=&#34;se&#34;&gt;\
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  --shm-size&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;16g &lt;span class=&#34;se&#34;&gt;\
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  --ulimit &lt;span class=&#34;nv&#34;&gt;memlock&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;-1 &lt;span class=&#34;se&#34;&gt;\
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  --ulimit &lt;span class=&#34;nv&#34;&gt;stack&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;m&#34;&gt;67108864&lt;/span&gt; &lt;span class=&#34;se&#34;&gt;\
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  nvcr.io/nvidia/pytorch:&lt;span class=&#34;si&#34;&gt;${&lt;/span&gt;&lt;span class=&#34;nv&#34;&gt;NV_PYTORCH_TAG&lt;/span&gt;&lt;span class=&#34;k&#34;&gt;:-&lt;/span&gt;&lt;span class=&#34;s1&#34;&gt;&amp;#39;nvcr.io/nvidia/nemo:25.02&amp;#39;&lt;/span&gt;&lt;span class=&#34;si&#34;&gt;}&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h2 id=&#34;future-work&#34;&gt;Future Work
&lt;/h2&gt;&lt;p&gt;The NeMo Framework Launcher does not currently support ASR and TTS
training, but it will soon.&lt;/p&gt;
&lt;h2 id=&#34;discussions-board&#34;&gt;Discussions Board
&lt;/h2&gt;&lt;p&gt;FAQ can be found on the NeMo &lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/NeMo/discussions&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Discussions
board&lt;/a&gt;. You are welcome to
ask questions or start discussions on the board.&lt;/p&gt;
&lt;h2 id=&#34;contribute-to-nemo&#34;&gt;Contribute to NeMo
&lt;/h2&gt;&lt;p&gt;We welcome community contributions! Please refer to
&lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/NeMo/blob/stable/CONTRIBUTING.md&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;CONTRIBUTING.md&lt;/a&gt;
for the process.&lt;/p&gt;
&lt;h2 id=&#34;publications&#34;&gt;Publications
&lt;/h2&gt;&lt;p&gt;We provide an ever-growing list of
&lt;a class=&#34;link&#34; href=&#34;https://nvidia.github.io/NeMo/publications/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;publications&lt;/a&gt; that utilize
the NeMo Framework.&lt;/p&gt;
&lt;p&gt;To contribute an article to the collection, please submit a pull request
to the &lt;code&gt;gh-pages-src&lt;/code&gt; branch of this repository. For detailed
information, please consult the README located at the &lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/NeMo/tree/gh-pages-src#readme&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;gh-pages-src
branch&lt;/a&gt;.&lt;/p&gt;
&lt;h2 id=&#34;blogs&#34;&gt;Blogs
&lt;/h2&gt;&lt;!-- markdownlint-disable --&gt;
&lt;details open&gt;
  &lt;summary&gt;&lt;b&gt;Large Language Models and Multimodal Models&lt;/b&gt;&lt;/summary&gt;
    &lt;details&gt;
      &lt;summary&gt;
        &lt;a href=&#34;https://blogs.nvidia.com/blog/bria-builds-responsible-generative-ai-using-nemo-picasso/&#34;&gt;
          Bria Builds Responsible Generative AI for Enterprises Using NVIDIA NeMo, Picasso
        &lt;/a&gt; (2024/03/06)
      &lt;/summary&gt;
      Bria, a Tel Aviv startup at the forefront of visual generative AI for enterprises now leverages the NVIDIA NeMo Framework. 
      The Bria.ai platform uses reference implementations from the NeMo Multimodal collection, trained on NVIDIA Tensor Core GPUs, to enable high-throughput and low-latency image generation. 
      Bria has also adopted NVIDIA Picasso, a foundry for visual generative AI models, to run inference.
      &lt;br&gt;&lt;br&gt;
    &lt;/details&gt;
    &lt;details&gt;
      &lt;summary&gt;
        &lt;a href=&#34;https://developer.nvidia.com/blog/new-nvidia-nemo-framework-features-and-nvidia-h200-supercharge-llm-training-performance-and-versatility/&#34;&gt;
          New NVIDIA NeMo Framework Features and NVIDIA H200
        &lt;/a&gt; (2023/12/06)
      &lt;/summary&gt;
      NVIDIA NeMo Framework now includes several optimizations and enhancements, 
      including: 
      1) Fully Sharded Data Parallelism (FSDP) to improve the efficiency of training large-scale AI models, 
      2) Mix of Experts (MoE)-based LLM architectures with expert parallelism for efficient LLM training at scale, 
      3) Reinforcement Learning from Human Feedback (RLHF) with TensorRT-LLM for inference stage acceleration, and 
      4) up to 4.2x speedups for Llama 2 pre-training on NVIDIA H200 Tensor Core GPUs.
      &lt;br&gt;&lt;br&gt;
      &lt;a href=&#34;https://developer.nvidia.com/blog/new-nvidia-nemo-framework-features-and-nvidia-h200-supercharge-llm-training-performance-and-versatility&#34;&gt;
      &lt;img src=&#34;https://github.com/sbhavani/TransformerEngine/blob/main/docs/examples/H200-NeMo-performance.png&#34; alt=&#34;H200-NeMo-performance&#34; style=&#34;width: 600px;&#34;&gt;&lt;/a&gt;
      &lt;br&gt;&lt;br&gt;
    &lt;/details&gt;
    &lt;details&gt;
      &lt;summary&gt;
        &lt;a href=&#34;https://blogs.nvidia.com/blog/nemo-amazon-titan/&#34;&gt;
          NVIDIA now powers training for Amazon Titan Foundation models
        &lt;/a&gt; (2023/11/28)
      &lt;/summary&gt;
      NVIDIA NeMo Framework now empowers the Amazon Titan foundation models (FM) with efficient training of large language models (LLMs). 
      The Titan FMs form the basis of Amazon’s generative AI service, Amazon Bedrock. 
      The NeMo Framework provides a versatile framework for building, customizing, and running LLMs.
      &lt;br&gt;&lt;br&gt;
    &lt;/details&gt;
&lt;/details&gt;
&lt;!-- markdownlint-enable --&gt;
&lt;h2 id=&#34;licenses&#34;&gt;Licenses
&lt;/h2&gt;&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/NeMo?tab=Apache-2.0-1-ov-file#readme&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NeMo GitHub Apache 2.0
license&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;NeMo is licensed under the &lt;a class=&#34;link&#34; href=&#34;https://www.nvidia.com/en-us/data-center/products/nvidia-ai-enterprise/eula/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NVIDIA AI PRODUCT
AGREEMENT&lt;/a&gt;.
By pulling and using the container, you accept the terms and
conditions of this license.&lt;/li&gt;
&lt;/ul&gt;
</description>
        </item>
        <item>
        <title>PhysX</title>
        <link>https://producthunt.programnotes.cn/en/p/physx/</link>
        <pubDate>Wed, 09 Apr 2025 15:29:43 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/physx/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1614282635334-600d44ad88b1?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NDQxODM2NTd8&amp;ixlib=rb-4.0.3" alt="Featured image of post PhysX" /&gt;&lt;h1 id=&#34;nvidia-omniversephysx&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA-Omniverse/PhysX&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NVIDIA-Omniverse/PhysX&lt;/a&gt;
&lt;/h1&gt;&lt;h1 id=&#34;nvidia-physx&#34;&gt;NVIDIA PhysX
&lt;/h1&gt;&lt;p&gt;Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.&lt;/p&gt;
&lt;p&gt;Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.&lt;/li&gt;
&lt;li&gt;Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.&lt;/li&gt;
&lt;li&gt;Neither the name of NVIDIA CORPORATION nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS &amp;ldquo;AS IS&amp;rdquo; AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.&lt;/p&gt;
&lt;h2 id=&#34;introduction&#34;&gt;Introduction
&lt;/h2&gt;&lt;p&gt;Welcome to the NVIDIA PhysX source code repository.&lt;/p&gt;
&lt;p&gt;This repository contains source releases of the PhysX, Flow, and Blast SDKs used in NVIDIA Omniverse.&lt;/p&gt;
&lt;h2 id=&#34;documentation&#34;&gt;Documentation
&lt;/h2&gt;&lt;p&gt;The user guide and API documentation are available on &lt;a class=&#34;link&#34; href=&#34;https://nvidia-omniverse.github.io/PhysX&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;GitHub Pages&lt;/a&gt;. Please create an &lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA-Omniverse/PhysX/issues/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Issue&lt;/a&gt; if you find a documentation issue.&lt;/p&gt;
&lt;h2 id=&#34;instructions&#34;&gt;Instructions
&lt;/h2&gt;&lt;p&gt;Please see instructions specific to each of the libraries in the respective subfolder.&lt;/p&gt;
&lt;h2 id=&#34;community-maintained-build-configuration-fork&#34;&gt;Community-Maintained Build Configuration Fork
&lt;/h2&gt;&lt;p&gt;Please see &lt;a class=&#34;link&#34; href=&#34;https://github.com/o3de/PhysX&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;the O3DE Fork&lt;/a&gt; for community-maintained additional build configurations.&lt;/p&gt;
&lt;h2 id=&#34;support&#34;&gt;Support
&lt;/h2&gt;&lt;ul&gt;
&lt;li&gt;Please use GitHub &lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA-Omniverse/PhysX/discussions/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Discussions&lt;/a&gt; for questions and comments.&lt;/li&gt;
&lt;li&gt;GitHub &lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA-Omniverse/PhysX/issues&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Issues&lt;/a&gt; should only be used for bug reports or documentation issues.&lt;/li&gt;
&lt;li&gt;You can also ask questions in the NVIDIA Omniverse #physics &lt;a class=&#34;link&#34; href=&#34;https://discord.com/invite/XWQNJDNuaC&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Discord Channel&lt;/a&gt;.&lt;/li&gt;
&lt;/ul&gt;
</description>
        </item>
        
    </channel>
</rss>
