<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
    <channel>
        <title>GPU on Producthunt daily</title>
        <link>https://producthunt.programnotes.cn/en/tags/gpu/</link>
        <description>Recent content in GPU on Producthunt daily</description>
        <generator>Hugo -- gohugo.io</generator>
        <language>en</language>
        <lastBuildDate>Tue, 23 Sep 2025 15:28:47 +0800</lastBuildDate><atom:link href="https://producthunt.programnotes.cn/en/tags/gpu/index.xml" rel="self" type="application/rss+xml" /><item>
        <title>Sunshine</title>
        <link>https://producthunt.programnotes.cn/en/p/sunshine/</link>
        <pubDate>Tue, 23 Sep 2025 15:28:47 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/sunshine/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1590147074903-b9ad6ba9eb5a?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NTg2MTI0NzN8&amp;ixlib=rb-4.1.0" alt="Featured image of post Sunshine" /&gt;&lt;h1 id=&#34;lizardbytesunshine&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/LizardByte/Sunshine&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;LizardByte/Sunshine&lt;/a&gt;
&lt;/h1&gt;&lt;div align=&#34;center&#34;&gt;
  &lt;img src=&#34;sunshine.png&#34; /&gt;
  &lt;h1 align=&#34;center&#34;&gt;Sunshine&lt;/h1&gt;
  &lt;h4 align=&#34;center&#34;&gt;Self-hosted game stream host for Moonlight.&lt;/h4&gt;
&lt;/div&gt;
&lt;div align=&#34;center&#34;&gt;
  &lt;a href=&#34;https://github.com/LizardByte/Sunshine&#34;&gt;&lt;img src=&#34;https://img.shields.io/github/stars/lizardbyte/sunshine.svg?logo=github&amp;style=for-the-badge&#34; alt=&#34;GitHub stars&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://github.com/LizardByte/Sunshine/releases/latest&#34;&gt;&lt;img src=&#34;https://img.shields.io/github/downloads/lizardbyte/sunshine/total.svg?style=for-the-badge&amp;logo=github&#34; alt=&#34;GitHub Releases&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://hub.docker.com/r/lizardbyte/sunshine&#34;&gt;&lt;img src=&#34;https://img.shields.io/docker/pulls/lizardbyte/sunshine.svg?style=for-the-badge&amp;logo=docker&#34; alt=&#34;Docker&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://github.com/LizardByte/Sunshine/pkgs/container/sunshine&#34;&gt;&lt;img src=&#34;https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fipitio.github.io%2Fbackage%2FLizardByte%2FSunshine%2Fsunshine.json&amp;query=%24.downloads&amp;label=ghcr%20pulls&amp;style=for-the-badge&amp;logo=github&#34; alt=&#34;GHCR&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://flathub.org/apps/dev.lizardbyte.app.Sunshine&#34;&gt;&lt;img src=&#34;https://img.shields.io/flathub/downloads/dev.lizardbyte.app.Sunshine?style=for-the-badge&amp;logo=flathub&#34; alt=&#34;Flathub installs&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://flathub.org/apps/dev.lizardbyte.app.Sunshine&#34;&gt;&lt;img src=&#34;https://img.shields.io/flathub/v/dev.lizardbyte.app.Sunshine?style=for-the-badge&amp;logo=flathub&#34; alt=&#34;Flathub Version&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://github.com/microsoft/winget-pkgs/tree/master/manifests/l/LizardByte/Sunshine&#34;&gt;&lt;img src=&#34;https://img.shields.io/winget/v/LizardByte.Sunshine?style=for-the-badge&amp;logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAHuSURBVFhH7ZfNTtRQGIYZiMDwN/IrCAqIhMSNKxcmymVwG+5dcDVsWHgDrtxwCYQVl+BChzDEwSnPY+eQ0sxoOz1mQuBNnpyvTdvz9jun5/SrjfxnJUkyQbMEz2ELduF1l0YUA3QyTrMAa2AnPtyOXsELeAYNyKtV2EC3k3lYgTOwg09ghy/BTp7CKBRV844BOpmmMV2+ySb4BmInG7AKY7AHH+EYqqhZo9PPBG/BVDlOizAD/XQFmnoPXzxRQX8M/CCYS48L6RIc4ygGHK9WGg9HZSZMUNRPVwNJGg5Hg2Qgqh4N3FsDsb6EmgYm07iwwvUxstdxJTwgmILf4CfZ6bb5OHANX8GN5x20IVxnG8ge94pt2xpwU3GnCwayF4Q2G2vgFLzHndFzQdk4q77nNfCdwL28qNyMtmEf3A1/QV5FjDiPWo5jrwf8TWZChTlgJvL4F9QL50/A43qVidTvLcuoM2wDQ1+IkgefgUpLcYwMVBqCKNJA2b0gKNocOIITOIef8C/F/CdMbh/GklynsSawKLHS8d9/B1x2LUqsfFyy3TMsWj5A1cLkotDbYO4JjWWZlZEGv8EbOIR1CAVN2eG8W5oNKgxaeC6DmTJjZs7ixUxpznLPLT+v4sXpoMLcLI3mzFSonDXIEI/M3QCIO4YuimBJ/gAAAABJRU5ErkJggg==&#34; alt=&#34;Winget Version&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://gurubase.io/g/sunshine&#34;&gt;&lt;img src=&#34;https://img.shields.io/badge/Gurubase-Ask%20Guru-ef1a1b?style=for-the-badge&amp;logo=data:image/jpeg;base64,/9j/2wCEAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDIBCQkJDAsMGA0NGDIhHCEyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMv/AABEIABgAGAMBIgACEQEDEQH/xAGiAAABBQEBAQEBAQAAAAAAAAAAAQIDBAUGBwgJCgsQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNRYQcicRQygZGhCCNCscEVUtHwJDNicoIJChYXGBkaJSYnKCkqNDU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6g4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2drh4uPk5ebn6Onq8fLz9PX29/j5+gEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoLEQACAQIEBAMEBwUEBAABAncAAQIDEQQFITEGEkFRB2FxEyIygQgUQpGhscEJIzNS8BVictEKFiQ04SXxFxgZGiYnKCkqNTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqCg4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2dri4+Tl5ufo6ery8/T19vf4+fr/2gAMAwEAAhEDEQA/AOLqSO3mlilljido4QGkYDIQEgAn05IH41seFo7aS+uRKlrJci2Y2cd2QImlyOGyQPu7sA8ZxXapAlvpThbPRkv7nTQWhDoIZZRc/XaSAOmcZGOnFfP06XMr3P17F5iqE+Tl1uuvf9Lde55dRW74pit4r61EcdtFdG2U3kVqQY0lyeBgkD5duQOASawqykuV2O6jV9rTU0rXLNjf3Om3QubSXy5QCudoYEEYIIOQR7GnahqV3qk6zXk3mOqhFAUKqqOyqAAByeAKqUUXdrFezhz89lfv1+8KKKKRZ//Z&#34; alt=&#34;Gurubase&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://github.com/LizardByte/Sunshine/actions/workflows/ci.yml?query=branch%3Amaster&#34;&gt;&lt;img src=&#34;https://img.shields.io/github/actions/workflow/status/lizardbyte/sunshine/ci.yml.svg?branch=master&amp;label=CI%20build&amp;logo=github&amp;style=for-the-badge&#34; alt=&#34;GitHub Workflow Status (CI)&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://github.com/LizardByte/Sunshine/actions/workflows/localize.yml?query=branch%3Amaster&#34;&gt;&lt;img src=&#34;https://img.shields.io/github/actions/workflow/status/lizardbyte/sunshine/localize.yml.svg?branch=master&amp;label=localize%20build&amp;logo=github&amp;style=for-the-badge&#34; alt=&#34;GitHub Workflow Status (localize)&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://docs.lizardbyte.dev/projects/sunshine&#34;&gt;&lt;img src=&#34;https://img.shields.io/readthedocs/sunshinestream.svg?label=Docs&amp;style=for-the-badge&amp;logo=readthedocs&#34; alt=&#34;Read the Docs&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://codecov.io/gh/LizardByte/Sunshine&#34;&gt;&lt;img src=&#34;https://img.shields.io/codecov/c/gh/LizardByte/Sunshine?token=SMGXQ5NVMJ&amp;style=for-the-badge&amp;logo=codecov&amp;label=codecov&#34; alt=&#34;Codecov&#34;&gt;&lt;/a&gt;
&lt;/div&gt;
&lt;h2 id=&#34;ℹ-about&#34;&gt;ℹ️ About
&lt;/h2&gt;&lt;p&gt;Sunshine is a self-hosted game stream host for Moonlight.
Offering low latency, cloud gaming server capabilities with support for AMD, Intel, and Nvidia GPUs for hardware
encoding. Software encoding is also available. You can connect to Sunshine from any Moonlight client on a variety of
devices. A web UI is provided to allow configuration, and client pairing, from your favorite web browser. Pair from
the local server or any mobile device.&lt;/p&gt;
&lt;p&gt;LizardByte has the full documentation hosted on &lt;a class=&#34;link&#34; href=&#34;https://docs.lizardbyte.dev/projects/sunshine&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Read the Docs&lt;/a&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.lizardbyte.dev/projects/sunshine/latest/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Stable&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.lizardbyte.dev/projects/sunshine/master/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Beta&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;-system-requirements&#34;&gt;🖥️ System Requirements
&lt;/h2&gt;&lt;blockquote&gt;
&lt;p&gt;[!WARNING]
These tables are a work in progress. Do not purchase hardware based on this information.&lt;/p&gt;
&lt;/blockquote&gt;
&lt;table&gt;
    &lt;caption id=&#34;minimum_requirements&#34;&gt;Minimum Requirements&lt;/caption&gt;
    &lt;tr&gt;
        &lt;th&gt;Component&lt;/th&gt;
        &lt;th&gt;Requirement&lt;/th&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;3&#34;&gt;GPU&lt;/td&gt;
        &lt;td&gt;AMD: VCE 1.0 or higher, see: &lt;a href=&#34;https://github.com/obsproject/obs-amd-encoder/wiki/Hardware-Support&#34;&gt;obs-amd hardware support&lt;/a&gt;&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;
            Intel:&lt;br&gt;
            &amp;nbsp;&amp;nbsp;Linux: VAAPI-compatible, see: &lt;a href=&#34;https://www.intel.com/content/www/us/en/developer/articles/technical/linuxmedia-vaapi.html&#34;&gt;VAAPI hardware support&lt;/a&gt;&lt;br&gt;
            &amp;nbsp;&amp;nbsp;Windows: Skylake or newer with QuickSync encoding support
        &lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Nvidia: NVENC enabled cards, see: &lt;a href=&#34;https://developer.nvidia.com/video-encode-and-decode-gpu-support-matrix-new&#34;&gt;nvenc support matrix&lt;/a&gt;&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;2&#34;&gt;CPU&lt;/td&gt;
        &lt;td&gt;AMD: Ryzen 3 or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Intel: Core i3 or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;RAM&lt;/td&gt;
        &lt;td&gt;4GB or more&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;5&#34;&gt;OS&lt;/td&gt;
        &lt;td&gt;Windows: 10+ (Windows Server does not support virtual gamepads)&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;macOS: 14+&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Linux/Debian: 13+ (trixie)&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Linux/Fedora: 41+&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Linux/Ubuntu: 22.04+ (jammy)&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;2&#34;&gt;Network&lt;/td&gt;
        &lt;td&gt;Host: 5GHz, 802.11ac&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Client: 5GHz, 802.11ac&lt;/td&gt;
    &lt;/tr&gt;
&lt;/table&gt;
&lt;table&gt;
    &lt;caption id=&#34;4k_suggestions&#34;&gt;4k Suggestions&lt;/caption&gt;
    &lt;tr&gt;
        &lt;th&gt;Component&lt;/th&gt;
        &lt;th&gt;Requirement&lt;/th&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;3&#34;&gt;GPU&lt;/td&gt;
        &lt;td&gt;AMD: Video Coding Engine 3.1 or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;
            Intel:&lt;br&gt;
            &amp;nbsp;&amp;nbsp;Linux: HD Graphics 510 or higher&lt;br&gt;
            &amp;nbsp;&amp;nbsp;Windows: Skylake or newer with QuickSync encoding support
        &lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;
            Nvidia:&lt;br&gt;
            &amp;nbsp;&amp;nbsp;Linux: GeForce RTX 2000 series or higher&lt;br&gt;
            &amp;nbsp;&amp;nbsp;Windows: Geforce GTX 1080 or higher
        &lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;2&#34;&gt;CPU&lt;/td&gt;
        &lt;td&gt;AMD: Ryzen 5 or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Intel: Core i5 or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;2&#34;&gt;Network&lt;/td&gt;
        &lt;td&gt;Host: CAT5e ethernet or better&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Client: CAT5e ethernet or better&lt;/td&gt;
    &lt;/tr&gt;
&lt;/table&gt;
&lt;table&gt;
    &lt;caption id=&#34;hdr_suggestions&#34;&gt;HDR Suggestions&lt;/caption&gt;
    &lt;tr&gt;
        &lt;th&gt;Component&lt;/th&gt;
        &lt;th&gt;Requirement&lt;/th&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;3&#34;&gt;GPU&lt;/td&gt;
        &lt;td&gt;AMD: Video Coding Engine 3.4 or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Intel: HD Graphics 730 or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Nvidia: Pascal-based GPU (GTX 10-series) or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;2&#34;&gt;CPU&lt;/td&gt;
        &lt;td&gt;AMD: Ryzen 5 or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Intel: Core i5 or higher&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td rowspan=&#34;2&#34;&gt;Network&lt;/td&gt;
        &lt;td&gt;Host: CAT5e ethernet or better&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
        &lt;td&gt;Client: CAT5e ethernet or better&lt;/td&gt;
    &lt;/tr&gt;
&lt;/table&gt;
&lt;h2 id=&#34;-support&#34;&gt;❓ Support
&lt;/h2&gt;&lt;p&gt;Our support methods are listed in our &lt;a class=&#34;link&#34; href=&#34;https://docs.lizardbyte.dev/latest/about/support.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;LizardByte Docs&lt;/a&gt;.&lt;/p&gt;
&lt;h2 id=&#34;-sponsors-and-supporters&#34;&gt;💲 Sponsors and Supporters
&lt;/h2&gt;&lt;p align=&#34;center&#34;&gt;
  &lt;a href=&#34;https://app.lizardbyte.dev&#34; aria-label=&#34;Sponsor LizardByte&#34;&gt;
    &lt;img src=&#39;https://raw.githubusercontent.com/LizardByte/contributors/refs/heads/dist/sponsors.svg&#39;/&gt;
  &lt;/a&gt;
&lt;/p&gt;
&lt;h2 id=&#34;-contributors&#34;&gt;👥 Contributors
&lt;/h2&gt;&lt;p&gt;Thank you to all the contributors who have helped make Sunshine better!&lt;/p&gt;
&lt;h3 id=&#34;github&#34;&gt;GitHub
&lt;/h3&gt;&lt;p align=&#34;center&#34;&gt;
  &lt;a href=&#34;https://github.com/LizardByte/Sunshine&#34; aria-label=&#34;GitHub&#34;&gt;
    &lt;img src=&#39;https://raw.githubusercontent.com/LizardByte/contributors/refs/heads/dist/github.Sunshine.svg&#39;/&gt;
  &lt;/a&gt;
&lt;/p&gt;
&lt;h3 id=&#34;crowdin&#34;&gt;CrowdIn
&lt;/h3&gt;&lt;p align=&#34;center&#34;&gt;
  &lt;a href=&#34;https://translate.lizardbyte.dev&#34; aria-label=&#34;CrowdIn&#34;&gt;
    &lt;img src=&#39;https://raw.githubusercontent.com/LizardByte/contributors/refs/heads/dist/crowdin.606145.svg&#39;/&gt;
  &lt;/a&gt;
&lt;/p&gt;
&lt;div class=&#34;section_buttons&#34;&gt;
&lt;table&gt;
  &lt;thead&gt;
      &lt;tr&gt;
          &lt;th style=&#34;text-align: left&#34;&gt;Previous&lt;/th&gt;
          &lt;th style=&#34;text-align: right&#34;&gt;Next&lt;/th&gt;
      &lt;/tr&gt;
  &lt;/thead&gt;
  &lt;tbody&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: left&#34;&gt;&lt;/td&gt;
          &lt;td style=&#34;text-align: right&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;docs/getting_started.md&#34; &gt;Getting Started&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
  &lt;/tbody&gt;
&lt;/table&gt;
&lt;/div&gt;
&lt;details style=&#34;display: none;&#34;&gt;
  &lt;summary&gt;&lt;/summary&gt;
  [TOC]
&lt;/details&gt;
</description>
        </item>
        <item>
        <title>LMCache</title>
        <link>https://producthunt.programnotes.cn/en/p/lmcache/</link>
        <pubDate>Wed, 20 Aug 2025 15:28:48 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/lmcache/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1478034460338-249ef2da6c0f?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NTU2NzQ5MDF8&amp;ixlib=rb-4.1.0" alt="Featured image of post LMCache" /&gt;&lt;h1 id=&#34;lmcachelmcache&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/LMCache/LMCache&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;LMCache/LMCache&lt;/a&gt;
&lt;/h1&gt;&lt;div align=&#34;center&#34;&gt;
  &lt;p align=&#34;center&#34;&gt;
    &lt;img src=&#34;https://raw.githubusercontent.com/LMCache/LMCache/dev/asset/logo.png&#34; width=&#34;720&#34; alt=&#34;lmcache logo&#34;&gt;
  &lt;/p&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.lmcache.ai/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/badge/docs-live-brightgreen&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Docs&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://pypi.org/project/lmcache/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/pypi/v/lmcache&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;PyPI&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://pypi.org/project/lmcache/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/pypi/pyversions/lmcache&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;PyPI - Python Version&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://buildkite.com/lmcache/lmcache-unittests&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://badge.buildkite.com/ce25f1819a274b7966273bfa54f0e02f092c3de0d7563c5c9d.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Unit Tests&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://github.com/LMCache/LMCache/actions/workflows/code_quality_checks.yml&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://github.com/lmcache/lmcache/actions/workflows/code_quality_checks.yml/badge.svg?branch=dev&amp;amp;label=tests&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Code Quality&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://buildkite.com/lmcache/lmcache-vllm-integration-tests&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://badge.buildkite.com/108ddd4ab482a2480999dec8c62a640a3315ed4e6c4e86798e.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Integration Tests&#34;
	
	
&gt;&lt;/a&gt;&lt;/p&gt;
   &lt;br /&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.bestpractices.dev/projects/10841&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://www.bestpractices.dev/projects/10841/badge&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;OpenSSF Best Practices&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://scorecard.dev/viewer/?uri=github.com/LMCache/LMCache&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://api.scorecard.dev/projects/github.com/LMCache/LMCache/badge&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;OpenSSF Scorecard&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://deepwiki.com/LMCache/LMCache/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://deepwiki.com/badge.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Ask DeepWiki&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://github.com/LMCache/LMCache/graphs/commit-activity&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/github/commit-activity/w/LMCache/LMCache&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;GitHub commit activity&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://pypi.org/project/lmcache/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/pypi/dm/lmcache&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;PyPI - Downloads&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://www.youtube.com/channel/UC58zMz55n70rtf1Ak2PULJA&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/youtube/channel/views/UC58zMz55n70rtf1Ak2PULJA&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;YouTube Channel Views&#34;
	
	
&gt;&lt;/a&gt;&lt;/p&gt;
&lt;/div&gt;
&lt;hr&gt;
&lt;p&gt;| &lt;a class=&#34;link&#34; href=&#34;https://blog.lmcache.ai/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;strong&gt;Blog&lt;/strong&gt;&lt;/a&gt;
| &lt;a class=&#34;link&#34; href=&#34;https://docs.lmcache.ai/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;strong&gt;Documentation&lt;/strong&gt;&lt;/a&gt;
| &lt;a class=&#34;link&#34; href=&#34;https://join.slack.com/t/lmcacheworkspace/shared_invite/zt-36x1m765z-8FgDA_73vcXtlZ_4XvpE6Q&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;strong&gt;Join Slack&lt;/strong&gt;&lt;/a&gt;
| &lt;a class=&#34;link&#34; href=&#34;https://forms.gle/MHwLiYDU6kcW3dLj7&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;strong&gt;Interest Form&lt;/strong&gt;&lt;/a&gt;
| &lt;a class=&#34;link&#34; href=&#34;https://github.com/LMCache/LMCache/issues/1253&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;strong&gt;Roadmap&lt;/strong&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;🔥 &lt;strong&gt;NEW: For enterprise-scale deployment of LMCache and vLLM, please check out vLLM &lt;a class=&#34;link&#34; href=&#34;https://github.com/vllm-project/production-stack&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Production Stack&lt;/a&gt;. LMCache is also officially supported in &lt;a class=&#34;link&#34; href=&#34;https://github.com/llm-d/llm-d/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;llm-d&lt;/a&gt; and &lt;a class=&#34;link&#34; href=&#34;https://github.com/kserve/kserve&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;KServe&lt;/a&gt;!&lt;/strong&gt;&lt;/p&gt;
&lt;h2 id=&#34;summary&#34;&gt;Summary
&lt;/h2&gt;&lt;p&gt;LMCache is an &lt;strong&gt;LLM&lt;/strong&gt; serving engine extension to &lt;strong&gt;reduce TTFT&lt;/strong&gt; and &lt;strong&gt;increase throughput&lt;/strong&gt;, especially under long-context scenarios. By storing the KV caches of reusable texts across various locations, including (GPU, CPU DRAM, Local Disk), LMCache reuses the KV caches of &lt;strong&gt;&lt;em&gt;any&lt;/em&gt;&lt;/strong&gt; reused text (not necessarily prefix) in &lt;strong&gt;&lt;em&gt;any&lt;/em&gt;&lt;/strong&gt; serving engine instance. Thus, LMCache saves precious GPU cycles and reduces user response delay.&lt;/p&gt;
&lt;p&gt;By combining LMCache with vLLM, developers achieve 3-10x delay savings and GPU cycle reduction in many LLM use cases, including multi-round QA and RAG.&lt;/p&gt;
&lt;p&gt;&lt;img src=&#34;https://github.com/user-attachments/assets/86137f17-f216-41a0-96a7-e537764f7a4c&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;performance&#34;
	
	
&gt;&lt;/p&gt;
&lt;h2 id=&#34;features&#34;&gt;Features
&lt;/h2&gt;&lt;ul&gt;
&lt;li&gt;&lt;input checked=&#34;&#34; disabled=&#34;&#34; type=&#34;checkbox&#34;&gt; 🔥 Integration with vLLM v1 with the following features:
&lt;ul&gt;
&lt;li&gt;High performance CPU KVCache offloading&lt;/li&gt;
&lt;li&gt;Disaggregated prefill&lt;/li&gt;
&lt;li&gt;P2P KVCache sharing&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;&lt;input checked=&#34;&#34; disabled=&#34;&#34; type=&#34;checkbox&#34;&gt; LMCache is supported in the &lt;a class=&#34;link&#34; href=&#34;https://github.com/vllm-project/production-stack/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;vLLM production stack&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://github.com/llm-d/llm-d/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;llm-d&lt;/a&gt;, and &lt;a class=&#34;link&#34; href=&#34;https://github.com/kserve/kserve&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;KServe&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;input checked=&#34;&#34; disabled=&#34;&#34; type=&#34;checkbox&#34;&gt; Stable support for non-prefix KV caches&lt;/li&gt;
&lt;li&gt;&lt;input checked=&#34;&#34; disabled=&#34;&#34; type=&#34;checkbox&#34;&gt; Storage support as follows:
&lt;ul&gt;
&lt;li&gt;CPU&lt;/li&gt;
&lt;li&gt;Disk&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/ai-dynamo/nixl&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NIXL&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;&lt;input checked=&#34;&#34; disabled=&#34;&#34; type=&#34;checkbox&#34;&gt; Installation support through pip and latest vLLM&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;installation&#34;&gt;Installation
&lt;/h2&gt;&lt;p&gt;To use LMCache, simply install &lt;code&gt;lmcache&lt;/code&gt; from your package manager, e.g. pip:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;pip install lmcache
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;Works on Linux NVIDIA GPU platform.&lt;/p&gt;
&lt;p&gt;More &lt;a class=&#34;link&#34; href=&#34;https://docs.lmcache.ai/getting_started/installation&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;detailed installation instructions&lt;/a&gt; are available in the docs, particularly if you are not using the latest stable version of vllm or using another serving engine with different dependencies. Any &amp;ldquo;undefined symbol&amp;rdquo; or torch mismatch versions can be resolved in the documentation.&lt;/p&gt;
&lt;h2 id=&#34;getting-started&#34;&gt;Getting started
&lt;/h2&gt;&lt;p&gt;The best way to get started is to checkout the &lt;a class=&#34;link&#34; href=&#34;https://docs.lmcache.ai/getting_started/quickstart/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Quickstart Examples&lt;/a&gt; in the docs.&lt;/p&gt;
&lt;h2 id=&#34;documentation&#34;&gt;Documentation
&lt;/h2&gt;&lt;p&gt;Check out the LMCache &lt;a class=&#34;link&#34; href=&#34;https://docs.lmcache.ai/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;documentation&lt;/a&gt; which is available online.&lt;/p&gt;
&lt;p&gt;We also post regularly in &lt;a class=&#34;link&#34; href=&#34;https://blog.lmcache.ai/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;LMCache blogs&lt;/a&gt;.&lt;/p&gt;
&lt;h2 id=&#34;examples&#34;&gt;Examples
&lt;/h2&gt;&lt;p&gt;Go hands-on with our &lt;a class=&#34;link&#34; href=&#34;https://github.com/LMCache/LMCache/tree/dev/examples&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;examples&lt;/a&gt;,
demonstrating how to address different use cases with LMCache.&lt;/p&gt;
&lt;h2 id=&#34;interested-in-connecting&#34;&gt;Interested in Connecting?
&lt;/h2&gt;&lt;p&gt;Fill out the &lt;a class=&#34;link&#34; href=&#34;https://forms.gle/mQfQDUXbKfp2St1z7&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;interest form&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://mailchi.mp/tensormesh/lmcache-sign-up-newsletter&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;sign up for our newsletter&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://join.slack.com/t/lmcacheworkspace/shared_invite/zt-2viziwhue-5Amprc9k5hcIdXT7XevTaQ&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;join LMCache slack&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://lmcache.ai/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;check out LMCache website&lt;/a&gt;, or &lt;a class=&#34;link&#34; href=&#34;mailto:contact@lmcache.ai&#34; &gt;drop an email&lt;/a&gt;, and our team will reach out to you!&lt;/p&gt;
&lt;h2 id=&#34;community-meeting&#34;&gt;Community meeting
&lt;/h2&gt;&lt;p&gt;The &lt;a class=&#34;link&#34; href=&#34;https://uchicago.zoom.us/j/6603596916?pwd=Z1E5MDRWUSt2am5XbEt4dTFkNGx6QT09&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;community meeting&lt;/a&gt; for LMCache is hosted bi-weekly. All are welcome to join!&lt;/p&gt;
&lt;p&gt;Meetings are held bi-weekly on: Tuesdays at 9:00 AM PT – &lt;a class=&#34;link&#34; href=&#34;https://drive.usercontent.google.com/u/0/uc?id=1f5EXbooGcwNwzIpTgn5u4PHqXgfypMtu&amp;amp;export=download&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Add to Calendar&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;We keep notes from each meeting on this &lt;a class=&#34;link&#34; href=&#34;https://docs.google.com/document/d/1_Fl3vLtERFa3vTH00cezri78NihNBtSClK-_1tSrcow&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;document&lt;/a&gt; for summaries of standups, discussion, and action items.&lt;/p&gt;
&lt;p&gt;Recordings of meetings are available on the &lt;a class=&#34;link&#34; href=&#34;https://www.youtube.com/channel/UC58zMz55n70rtf1Ak2PULJA&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;YouTube LMCache channel&lt;/a&gt;.&lt;/p&gt;
&lt;h2 id=&#34;contributing&#34;&gt;Contributing
&lt;/h2&gt;&lt;p&gt;We welcome and value all contributions and collaborations.  Please check out &lt;a class=&#34;link&#34; href=&#34;CONTRIBUTING.md&#34; &gt;Contributing Guide&lt;/a&gt; on how to contribute.&lt;/p&gt;
&lt;p&gt;We continually update &lt;a class=&#34;link&#34; href=&#34;https://github.com/LMCache/LMCache/issues/627&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;[Onboarding] Welcoming contributors with good first issues!&lt;/a&gt;&lt;/p&gt;
&lt;h2 id=&#34;citation&#34;&gt;Citation
&lt;/h2&gt;&lt;p&gt;If you use LMCache for your research, please cite our papers:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt; 1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 6
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 7
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 8
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 9
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;10
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;11
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;12
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;13
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;14
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;15
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;16
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;17
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;18
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;19
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;20
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;21
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;22
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;23
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;24
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-fallback&#34; data-lang=&#34;fallback&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;@inproceedings{liu2024cachegen,
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  title={Cachegen: Kv cache compression and streaming for fast large language model serving},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  author={Liu, Yuhan and Li, Hanchen and Cheng, Yihua and Ray, Siddhant and Huang, Yuyang and Zhang, Qizheng and Du, Kuntai and Yao, Jiayi and Lu, Shan and Ananthanarayanan, Ganesh and others},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  booktitle={Proceedings of the ACM SIGCOMM 2024 Conference},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  pages={38--56},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  year={2024}
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;}
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;@article{cheng2024large,
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  title={Do Large Language Models Need a Content Delivery Network?},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  author={Cheng, Yihua and Du, Kuntai and Yao, Jiayi and Jiang, Junchen},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  journal={arXiv preprint arXiv:2409.13761},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  year={2024}
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;}
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;@inproceedings{10.1145/3689031.3696098,
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  author = {Yao, Jiayi and Li, Hanchen and Liu, Yuhan and Ray, Siddhant and Cheng, Yihua and Zhang, Qizheng and Du, Kuntai and Lu, Shan and Jiang, Junchen},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  title = {CacheBlend: Fast Large Language Model Serving for RAG with Cached Knowledge Fusion},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  year = {2025},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  url = {https://doi.org/10.1145/3689031.3696098},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  doi = {10.1145/3689031.3696098},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  booktitle = {Proceedings of the Twentieth European Conference on Computer Systems},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  pages = {94–109},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;}
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h2 id=&#34;socials&#34;&gt;Socials
&lt;/h2&gt;&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.linkedin.com/company/lmcache-lab/?viewAsMember=true&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Linkedin&lt;/a&gt; | &lt;a class=&#34;link&#34; href=&#34;https://x.com/lmcache&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Twitter&lt;/a&gt; | &lt;a class=&#34;link&#34; href=&#34;https://www.youtube.com/@LMCacheTeam&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Youtube&lt;/a&gt;&lt;/p&gt;
&lt;h2 id=&#34;license&#34;&gt;License
&lt;/h2&gt;&lt;p&gt;The LMCache codebase is licensed under Apache License 2.0. See the &lt;a class=&#34;link&#34; href=&#34;LICENSE&#34; &gt;LICENSE&lt;/a&gt; file for details.&lt;/p&gt;
</description>
        </item>
        <item>
        <title>hashcat</title>
        <link>https://producthunt.programnotes.cn/en/p/hashcat/</link>
        <pubDate>Wed, 06 Aug 2025 15:37:25 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/hashcat/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1650749837474-a9ab19e3d1af?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NTQ0NjU3NTF8&amp;ixlib=rb-4.1.0" alt="Featured image of post hashcat" /&gt;&lt;h1 id=&#34;hashcathashcat&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/hashcat/hashcat&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;hashcat/hashcat&lt;/a&gt;
&lt;/h1&gt;&lt;h2 id=&#34;hashcat&#34;&gt;&lt;em&gt;hashcat&lt;/em&gt;
&lt;/h2&gt;&lt;p&gt;&lt;strong&gt;hashcat&lt;/strong&gt; is the world&amp;rsquo;s fastest and most advanced password recovery utility, supporting five unique modes of attack for over 300 highly-optimized hashing algorithms. hashcat currently supports CPUs, GPUs, and other hardware accelerators on Linux, Windows, and macOS, and has facilities to help enable distributed password cracking.&lt;/p&gt;
&lt;h3 id=&#34;license&#34;&gt;License
&lt;/h3&gt;&lt;p&gt;&lt;strong&gt;hashcat&lt;/strong&gt; is licensed under the MIT license. Refer to &lt;a class=&#34;link&#34; href=&#34;docs/license.txt&#34; &gt;docs/license.txt&lt;/a&gt; for more information.&lt;/p&gt;
&lt;h3 id=&#34;installation&#34;&gt;Installation
&lt;/h3&gt;&lt;p&gt;Download the &lt;a class=&#34;link&#34; href=&#34;https://hashcat.net/hashcat/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;latest release&lt;/a&gt; and unpack it in the desired location. Please remember to use &lt;code&gt;7z x&lt;/code&gt; when unpacking the archive from the command line to ensure full file paths remain intact.&lt;/p&gt;
&lt;h3 id=&#34;usagehelp&#34;&gt;Usage/Help
&lt;/h3&gt;&lt;p&gt;Please refer to the &lt;a class=&#34;link&#34; href=&#34;https://hashcat.net/wiki/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Hashcat Wiki&lt;/a&gt; and the output of &lt;code&gt;--help&lt;/code&gt; for usage information and general help. A list of frequently asked questions may also be found &lt;a class=&#34;link&#34; href=&#34;https://hashcat.net/wiki/doku.php?id=frequently_asked_questions&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;here&lt;/a&gt;. The &lt;a class=&#34;link&#34; href=&#34;https://hashcat.net/forum/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Hashcat Forum&lt;/a&gt; also contains a plethora of information. If you still think you need help by a real human come to &lt;a class=&#34;link&#34; href=&#34;https://discord.gg/HFS523HGBT&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Discord&lt;/a&gt;.&lt;/p&gt;
&lt;h3 id=&#34;building&#34;&gt;Building
&lt;/h3&gt;&lt;p&gt;Refer to &lt;a class=&#34;link&#34; href=&#34;BUILD.md&#34; &gt;BUILD.md&lt;/a&gt; for instructions on how to build &lt;strong&gt;hashcat&lt;/strong&gt; from source.&lt;/p&gt;
&lt;p&gt;Tests:&lt;/p&gt;
&lt;table&gt;
  &lt;thead&gt;
      &lt;tr&gt;
          &lt;th&gt;Travis&lt;/th&gt;
          &lt;th&gt;Coverity&lt;/th&gt;
          &lt;th&gt;GitHub Actions&lt;/th&gt;
      &lt;/tr&gt;
  &lt;/thead&gt;
  &lt;tbody&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://travis-ci.org/hashcat/hashcat&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://travis-ci.org/hashcat/hashcat.svg?branch=master&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Hashcat Travis Build status&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://scan.coverity.com/projects/hashcat&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://scan.coverity.com/projects/11753/badge.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Coverity Scan Build Status&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/hashcat/hashcat/actions/workflows/build.yml&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://github.com/hashcat/hashcat/actions/workflows/build.yml/badge.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Hashcat GitHub Actions Build status&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
  &lt;/tbody&gt;
&lt;/table&gt;
&lt;h3 id=&#34;contributing&#34;&gt;Contributing
&lt;/h3&gt;&lt;p&gt;Contributions are welcome and encouraged, provided your code is of sufficient quality. Before submitting a pull request, please ensure your code adheres to the following requirements:&lt;/p&gt;
&lt;ol&gt;
&lt;li&gt;Licensed under MIT license, or dedicated to the public domain (BSD, GPL, etc. code is incompatible)&lt;/li&gt;
&lt;li&gt;Adheres to gnu99 standard&lt;/li&gt;
&lt;li&gt;Compiles cleanly with no warnings when compiled with &lt;code&gt;-W -Wall -std=gnu99&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;Uses &lt;a class=&#34;link&#34; href=&#34;https://en.wikipedia.org/wiki/Indent_style#Allman_style&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Allman-style&lt;/a&gt; code blocks &amp;amp; indentation&lt;/li&gt;
&lt;li&gt;Uses 2-spaces as the indentation or a tab if it&amp;rsquo;s required (for example: Makefiles)&lt;/li&gt;
&lt;li&gt;Uses lower-case function and variable names&lt;/li&gt;
&lt;li&gt;Avoids the use of &lt;code&gt;!&lt;/code&gt; and uses positive conditionals wherever possible (e.g., &lt;code&gt;if (foo == 0)&lt;/code&gt; instead of &lt;code&gt;if (!foo)&lt;/code&gt;, and &lt;code&gt;if (foo)&lt;/code&gt; instead of &lt;code&gt;if (foo != 0)&lt;/code&gt;)&lt;/li&gt;
&lt;li&gt;Use code like array[index + 0] if you also need to do array[index + 1], to keep it aligned&lt;/li&gt;
&lt;/ol&gt;
&lt;p&gt;You can use GNU Indent to help assist you with the style requirements:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-fallback&#34; data-lang=&#34;fallback&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;indent -st -bad -bap -sc -bl -bli0 -ncdw -nce -cli0 -cbi0 -pcs -cs -npsl -bs -nbc -bls -blf -lp -i2 -ts2 -nut -l1024 -nbbo -fca -lc1024 -fc1
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;Your pull request should fully describe the functionality you are adding/removing or the problem you are solving. Regardless of whether your patch modifies one line or one thousand lines, you must describe what has prompted and/or motivated the change.&lt;/p&gt;
&lt;p&gt;Solve only one problem in each pull request. If you&amp;rsquo;re fixing a bug and adding a new feature, you need to make two separate pull requests. If you&amp;rsquo;re fixing three bugs, you need to make three separate pull requests. If you&amp;rsquo;re adding four new features, you need to make four separate pull requests. So on, and so forth.&lt;/p&gt;
&lt;p&gt;If your patch fixes a bug, please be sure there is an &lt;a class=&#34;link&#34; href=&#34;https://github.com/hashcat/hashcat/issues&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;issue&lt;/a&gt; open for the bug before submitting a pull request. If your patch aims to improve performance or optimize an algorithm, be sure to quantify your optimizations and document the trade-offs, and back up your claims with benchmarks and metrics.&lt;/p&gt;
&lt;p&gt;In order to maintain the quality and integrity of the &lt;strong&gt;hashcat&lt;/strong&gt; source tree, all pull requests must be reviewed and signed off by at least two &lt;a class=&#34;link&#34; href=&#34;https://github.com/orgs/hashcat/people&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;board members&lt;/a&gt; before being merged. The &lt;a class=&#34;link&#34; href=&#34;https://github.com/jsteube&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;project lead&lt;/a&gt; has the ultimate authority in deciding whether to accept or reject a pull request. Do not be discouraged if your pull request is rejected!&lt;/p&gt;
&lt;h3 id=&#34;happy-cracking&#34;&gt;Happy Cracking!
&lt;/h3&gt;</description>
        </item>
        <item>
        <title>ZLUDA</title>
        <link>https://producthunt.programnotes.cn/en/p/zluda/</link>
        <pubDate>Mon, 07 Jul 2025 15:31:03 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/zluda/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1736452221254-ae8d76bf3c79?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NTE4NzM0MjZ8&amp;ixlib=rb-4.1.0" alt="Featured image of post ZLUDA" /&gt;&lt;h1 id=&#34;vosenzluda&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/vosen/ZLUDA&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;vosen/ZLUDA&lt;/a&gt;
&lt;/h1&gt;&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://discord.gg/sg6BNzXuc7&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/badge/Discord-%235865F2.svg?style=for-the-badge&amp;amp;logo=discord&amp;amp;logoColor=white&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Discord&#34;
	
	
&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h1 id=&#34;zluda&#34;&gt;ZLUDA
&lt;/h1&gt;&lt;p&gt;ZLUDA is a drop-in replacement for CUDA on non-NVIDIA GPU. ZLUDA allows to run unmodified CUDA applications using non-NVIDIA GPUs with near-native performance.&lt;/p&gt;
&lt;p&gt;ZLUDA supports AMD Radeon RX 5000 series and newer GPUs (both desktop and integrated).&lt;/p&gt;
&lt;p&gt;&lt;img src=&#34;https://producthunt.programnotes.cn/geekbench.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;GeekBench 5.5.1 chart&#34;
	
	
&gt;&lt;/p&gt;
&lt;p&gt;ZLUDA is work in progress. Follow development here and say hi on &lt;a class=&#34;link&#34; href=&#34;https://discord.gg/sg6BNzXuc7&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Discord&lt;/a&gt;. For more details see the announcement: &lt;a class=&#34;link&#34; href=&#34;https://vosen.github.io/ZLUDA/blog/zludas-third-life/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;https://vosen.github.io/ZLUDA/blog/zludas-third-life/&lt;/a&gt;&lt;/p&gt;
&lt;h2 id=&#34;usage&#34;&gt;Usage
&lt;/h2&gt;&lt;p&gt;&lt;strong&gt;Warning&lt;/strong&gt;: This version ZLUDA is under heavy development (more &lt;a class=&#34;link&#34; href=&#34;https://vosen.github.io/ZLUDA/blog/zludas-third-life/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;here&lt;/a&gt;) and right now only supports Geekbench. ZLUDA probably will not work with your application just yet.&lt;/p&gt;
&lt;h3 id=&#34;windows&#34;&gt;Windows
&lt;/h3&gt;&lt;p&gt;You should have recent AMD GPU driver (&amp;ldquo;AMD Software: Adrenalin Edition&amp;rdquo;) installed.&lt;br&gt;
To run your application you should etiher:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;(Recommended approach) Copy ZLUDA-provided &lt;code&gt;nvcuda.dll&lt;/code&gt; and &lt;code&gt;nvml.dll&lt;/code&gt; from &lt;code&gt;target\release&lt;/code&gt; (if built from sources) or &lt;code&gt;zluda&lt;/code&gt; (if downloaded a zip package) into a path which your application uses to load CUDA. Paths vary application to application, but usually it&amp;rsquo;s the directory where the .exe file is located&lt;/li&gt;
&lt;li&gt;Use ZLUDA launcher like below. ZLUDA launcher is known to be buggy and incomplete:
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-fallback&#34; data-lang=&#34;fallback&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&amp;lt;ZLUDA_DIRECTORY&amp;gt;\zluda_with.exe -- &amp;lt;APPLICATION&amp;gt; &amp;lt;APPLICATIONS_ARGUMENTS&amp;gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;linux&#34;&gt;Linux
&lt;/h3&gt;&lt;p&gt;Run your application like this:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-fallback&#34; data-lang=&#34;fallback&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;LD_LIBRARY_PATH=&amp;lt;ZLUDA_DIRECTORY&amp;gt; &amp;lt;APPLICATION&amp;gt; &amp;lt;APPLICATIONS_ARGUMENTS&amp;gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;where &lt;code&gt;&amp;lt;ZLUDA_DIRECTORY&amp;gt;&lt;/code&gt; is the directory which contains ZLUDA-provided &lt;code&gt;libcuda.so&lt;/code&gt;: &lt;code&gt;target/release&lt;/code&gt; if you built from sources or &lt;code&gt;zluda&lt;/code&gt; if you downloaded prebuilt package.&lt;/p&gt;
&lt;h3 id=&#34;macos&#34;&gt;MacOS
&lt;/h3&gt;&lt;p&gt;Not supported&lt;/p&gt;
&lt;h2 id=&#34;building&#34;&gt;Building
&lt;/h2&gt;&lt;h3 id=&#34;dependencies&#34;&gt;Dependencies
&lt;/h3&gt;&lt;ul&gt;
&lt;li&gt;Git&lt;/li&gt;
&lt;li&gt;CMake&lt;/li&gt;
&lt;li&gt;Python 3&lt;/li&gt;
&lt;li&gt;Rust compiler (recent version)&lt;/li&gt;
&lt;li&gt;C++ compiler&lt;/li&gt;
&lt;li&gt;(Optional, but recommended) &lt;a class=&#34;link&#34; href=&#34;https://ninja-build.org/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Ninja build system&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;build-steps&#34;&gt;Build steps
&lt;/h3&gt;&lt;ul&gt;
&lt;li&gt;Git clone the repo (make sure to use &lt;code&gt;--recursive&lt;/code&gt; option to fetch submodules):&lt;br&gt;
&lt;code&gt;git clone --recursive https://github.com/vosen/ZLUDA.git&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;Enter freshly cloned &lt;code&gt;ZLUDA&lt;/code&gt; directory and build with cargo (this takes a while):&lt;br&gt;
&lt;code&gt;cargo xtask --release&lt;/code&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;contributing&#34;&gt;Contributing
&lt;/h2&gt;&lt;p&gt;ZLUDA project has a commercial backing and &lt;em&gt;does not&lt;/em&gt; accept donations.
ZLUDA project accepts pull requests and other non-monetary contributions.&lt;/p&gt;
&lt;p&gt;If you want to contribute a code fix or documentation update feel free to open a Pull Request.&lt;/p&gt;
&lt;h3 id=&#34;getting-started&#34;&gt;Getting started
&lt;/h3&gt;&lt;p&gt;There&amp;rsquo;s no architecture document (yet). Two most important crates in ZLUDA are &lt;code&gt;ptx&lt;/code&gt; (PTX compiler) and &lt;code&gt;zluda&lt;/code&gt; (AMD GPU runtime). A good starting point to tinkering the project is to run one of the &lt;code&gt;ptx&lt;/code&gt; unit tests under a debugger and understand what it is doing. &lt;code&gt;cargo test -p ptx -- ::add_hip&lt;/code&gt; is a simple test that adds two numbers.&lt;/p&gt;
&lt;p&gt;Github issues tagged with &lt;a class=&#34;link&#34; href=&#34;https://github.com/vosen/ZLUDA/issues?q=is%3Aissue&amp;#43;is%3Aopen&amp;#43;label%3A%22help&amp;#43;wanted%22&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&amp;ldquo;help wanted&amp;rdquo;&lt;/a&gt; are tasks that are self-containted. Their level of difficulty varies, they are not always good beginner tasks, but they defined unambiguously.&lt;/p&gt;
&lt;p&gt;If you have questions feel free to ask on &lt;a class=&#34;link&#34; href=&#34;https://discord.com/channels/1273316903783497778/1303329281409159270&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;#devtalk channel on Discord&lt;/a&gt;.&lt;/p&gt;
&lt;h2 id=&#34;license&#34;&gt;License
&lt;/h2&gt;&lt;p&gt;This software is dual-licensed under either the Apache 2.0 license or the MIT license. See &lt;a class=&#34;link&#34; href=&#34;LICENSE-APACHE&#34; &gt;LICENSE-APACHE&lt;/a&gt; or &lt;a class=&#34;link&#34; href=&#34;LICENSE-MIT&#34; &gt;LICENSE-MIT&lt;/a&gt; for details&lt;/p&gt;
</description>
        </item>
        <item>
        <title>rl-swarm</title>
        <link>https://producthunt.programnotes.cn/en/p/rl-swarm/</link>
        <pubDate>Sat, 28 Jun 2025 15:28:05 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/rl-swarm/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1584785933913-feb6e407f2a2?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NTEwOTU2MjR8&amp;ixlib=rb-4.1.0" alt="Featured image of post rl-swarm" /&gt;&lt;h1 id=&#34;gensyn-airl-swarm&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/gensyn-ai/rl-swarm&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;gensyn-ai/rl-swarm&lt;/a&gt;
&lt;/h1&gt;&lt;h1 id=&#34;rl-swarm&#34;&gt;RL Swarm
&lt;/h1&gt;&lt;p&gt;RL Swarm is a peer-to-peer system for reinforcement learning. It allows you to train models collaboratively with others in the swarm, leveraging their collective intelligence. It is open source and permissionless, meaning you can run it on a consumer laptop at home or on a powerful GPU in the cloud. You can also connect your model to the Gensyn Testnet to receive an on-chain identity that tracks your progress over time.&lt;/p&gt;
&lt;p&gt;Currently, we are running the &lt;a class=&#34;link&#34; href=&#34;https://github.com/open-thought/reasoning-gym/tree/main&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;reasoning-gym&lt;/a&gt; swarm on the Testnet. This swarm is designed to train models to solve a diverse set of reasoning tasks using the reasoning-gym dataset. The current list of default models includes:&lt;/p&gt;
&lt;p&gt;Models:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Gensyn/Qwen2.5-0.5B-Instruct&lt;/li&gt;
&lt;li&gt;Qwen/Qwen3-0.6B&lt;/li&gt;
&lt;li&gt;nvidia/AceInstruct-1.5B&lt;/li&gt;
&lt;li&gt;dnotitia/Smoothie-Qwen3-1.7B&lt;/li&gt;
&lt;li&gt;Gensyn/Qwen2.5-1.5B-Instruct&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;This iteration of rl-swarm is powered by the &lt;a class=&#34;link&#34; href=&#34;https://github.com/gensyn-ai/genrl-swarm&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;GenRL-Swarm&lt;/a&gt; library.  It is a fully composable framework for decentralized reinforcement learning which enables users to create and customize their own swarms for reinforcement learning with multi-agent multi-stage environments.&lt;/p&gt;
&lt;h2 id=&#34;requirements&#34;&gt;Requirements
&lt;/h2&gt;&lt;p&gt;Your hardware requirements will vary depending on a number of factors including model size and the accelerator platform you use.  Users running large NVIDIA GPU will be assigned a model from the large model pool, while users running less powerful hardware will be assigned a model from the small model pool. This design decision is intended to allow users to advance at a similar rate regardless of the hardware they use, maximizing their utility to the swarm.&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;Supported Hardware&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;arm64 or x86 CPU with minimum 32gb ram (note that if you run other applications during training it might crash training).&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;OR&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;CUDA devices (officially supported):
&lt;ul&gt;
&lt;li&gt;RTX 3090&lt;/li&gt;
&lt;li&gt;RTX 4090&lt;/li&gt;
&lt;li&gt;RTX 5090&lt;/li&gt;
&lt;li&gt;A100&lt;/li&gt;
&lt;li&gt;H100&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;With either configuration, you will need Python &amp;gt;=3.10 (for Mac, you will likely need to upgrade).&lt;/p&gt;
&lt;h2 id=&#34;-please-read-before-continuing-&#34;&gt;⚠️ Please read before continuing ⚠️
&lt;/h2&gt;&lt;p&gt;This software is &lt;strong&gt;experimental&lt;/strong&gt; and provided as-is for users who are interested in using (or helping to develop) an early version of the Gensyn Protocol for training models.&lt;/p&gt;
&lt;p&gt;If you care about on-chain participation, you &lt;strong&gt;must&lt;/strong&gt; read the &lt;a class=&#34;link&#34; href=&#34;#identity-management&#34; &gt;Identity Management&lt;/a&gt; section below.&lt;/p&gt;
&lt;p&gt;If you encounter issues, please first check &lt;a class=&#34;link&#34; href=&#34;#troubleshooting&#34; &gt;Troubleshooting&lt;/a&gt;. If you cannot find a solution there, please check if there is an open (or closed) &lt;a class=&#34;link&#34; href=&#34;../../issues&#34; &gt;Issue&lt;/a&gt;. If there is no relevant issue, please file one and include 1) all relevant &lt;a class=&#34;link&#34; href=&#34;#troubleshooting&#34; &gt;logs&lt;/a&gt;, 2) information about your device (e.g. which GPU, if relevant), and 3) your operating system information.&lt;/p&gt;
&lt;h2 id=&#34;instructions&#34;&gt;Instructions
&lt;/h2&gt;&lt;h3 id=&#34;run-the-swarm&#34;&gt;Run the Swarm
&lt;/h3&gt;&lt;p&gt;The easiest way to run RL Swarm is using Docker. This ensures a consistent setup across all operating systems with minimal dependencies.&lt;/p&gt;
&lt;h4 id=&#34;1-clone-this-repo&#34;&gt;1. Clone this repo
&lt;/h4&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-sh&#34; data-lang=&#34;sh&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;git clone https://github.com/gensyn-ai/rl-swarm
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h4 id=&#34;2-install-docker&#34;&gt;2. Install Docker
&lt;/h4&gt;&lt;p&gt;Make sure you have Docker installed and the Docker daemon is running on your machine. To do that, follow &lt;a class=&#34;link&#34; href=&#34;https://docs.docker.com/get-started/get-docker/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;these instructions&lt;/a&gt; according to your OS. Ensure you allot sufficient memory to the Docker containers. For example if using Docker Desktop, this can be done by going to Docker Desktop Settings &amp;gt; Resources &amp;gt; Advanced &amp;gt; Memory Limit, and increasing it to the maximum possible value.&lt;/p&gt;
&lt;h4 id=&#34;3-start-the-swarm&#34;&gt;3. Start the Swarm
&lt;/h4&gt;&lt;p&gt;Run the following commands from the root of the repository.&lt;/p&gt;
&lt;h5 id=&#34;cpu-support&#34;&gt;CPU support
&lt;/h5&gt;&lt;p&gt;If you’re using a Mac or if your machine has CPU-only support:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-sh&#34; data-lang=&#34;sh&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;docker-compose run --rm --build -Pit swarm-cpu
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h5 id=&#34;gpu-support&#34;&gt;GPU support
&lt;/h5&gt;&lt;p&gt;If you&amp;rsquo;re using a machine with an officially supported GPU:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-sh&#34; data-lang=&#34;sh&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;docker-compose run --rm --build -Pit swarm-gpu
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h5 id=&#34;docker-compose-issue&#34;&gt;Docker compose issue
&lt;/h5&gt;&lt;p&gt;If &lt;code&gt;docker-compose&lt;/code&gt; does not work when running the above commands, please try &lt;code&gt;docker compose&lt;/code&gt; (no hyphen) instead. I.e. &lt;code&gt; docker compose run --rm --build -Pit swarm-gpu&lt;/code&gt;. This issue sometimes occurs on users running Ubuntu.&lt;/p&gt;
&lt;h3 id=&#34;experimental-advanced-mode&#34;&gt;Experimental (advanced) mode
&lt;/h3&gt;&lt;p&gt;If you want to experiment with the &lt;a class=&#34;link&#34; href=&#34;https://github.com/gensyn-ai/genrl-swarm&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;GenRL-Swarm&lt;/a&gt; library and its &lt;a class=&#34;link&#34; href=&#34;https://github.com/gensyn-ai/genrl-swarm/blob/main/recipes/rgym/rg-swarm.yaml&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;configurable parameters&lt;/a&gt;, we recommend you run RL Swarm via shell script:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-sh&#34; data-lang=&#34;sh&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;python3 -m venv .venv
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;source&lt;/span&gt; .venv/bin/activate
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;./run_rl_swarm.sh
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;To learn more about experimental mode, check out our &lt;a class=&#34;link&#34; href=&#34;https://github.com/gensyn-ai/genrl-swarm/blob/main/getting_started.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;getting started guide&lt;/a&gt;.&lt;/p&gt;
&lt;h3 id=&#34;login&#34;&gt;Login
&lt;/h3&gt;&lt;ol&gt;
&lt;li&gt;A browser window will pop open (you&amp;rsquo;ll need to manually navigate to http://localhost:3000/ if you&amp;rsquo;re on a VM).&lt;/li&gt;
&lt;li&gt;Click &amp;rsquo;login&#39;.&lt;/li&gt;
&lt;li&gt;Login with your preferred method.&lt;/li&gt;
&lt;/ol&gt;
&lt;h3 id=&#34;huggingface&#34;&gt;Huggingface
&lt;/h3&gt;&lt;p&gt;If you would like to upload your model to Hugging Face, enter your Hugging Face access token when prompted. You can generate one from your Hugging Face account, under &lt;a class=&#34;link&#34; href=&#34;https://huggingface.co/docs/hub/en/security-tokens&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Access Tokens&lt;/a&gt;.&lt;/p&gt;
&lt;h3 id=&#34;initial-peering-and-training&#34;&gt;Initial peering and training
&lt;/h3&gt;&lt;p&gt;From this stage onward your device will begin training. You should see your peer register and vote on-chain &lt;a class=&#34;link&#34; href=&#34;https://gensyn-testnet.explorer.alchemy.com/address/0xFaD7C5e93f28257429569B854151A1B8DCD404c2?tab=logs&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;here&lt;/a&gt;.&lt;/p&gt;
&lt;p&gt;You can also track your training progress in real time:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;On The RL-Swarm Dashboard: &lt;a class=&#34;link&#34; href=&#34;https://dashboard.gensyn.ai&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;dashboard.gensyn.ai&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;identity-management&#34;&gt;Identity management
&lt;/h2&gt;&lt;h3 id=&#34;introduction&#34;&gt;Introduction
&lt;/h3&gt;&lt;p&gt;On-chain identity is managed via an Alchemy modal sign-in screen. You need to supply an email address or login via a supported method (e.g. Google). This creates an EOA public/private key (which are stored by Alchemy). You will also receive local session keys in the &lt;code&gt;userApiKey&lt;/code&gt;. Note that these aren&amp;rsquo;t your EOA public/private keys.&lt;/p&gt;
&lt;p&gt;During the initial set-up process, you will also create a &lt;code&gt;swarm.pem&lt;/code&gt; file which maintains the identity of your peer. This is then registered on chain using the EOA wallet hosted in Alchemy, triggered using your local api keys. This links the &lt;code&gt;swarm.pem&lt;/code&gt; to the &lt;code&gt;email address&lt;/code&gt; (and corresponding EOA in Alchemy).&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;If you want to link multiple nodes to a single EOA&lt;/strong&gt;, simply sign up each node using the same email address. You will get a new peer ID for each node, however they will all be linked to the same EOA that your email is linked to.&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;Please note&lt;/strong&gt;: if you are using a fork of this repo, or a service organised by someone else (e.g. a &amp;lsquo;one click deployment&amp;rsquo; provider) the identity management flow below is not guaranteed.&lt;/p&gt;
&lt;h3 id=&#34;what-this-means&#34;&gt;What this means
&lt;/h3&gt;&lt;p&gt;In the following two scenarios, everything will work (i.e. you will have an on-chain identity linked with your RL Swarm peer training):&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;The very first time you run the node from scratch with a new email address. The smart account will be created fresh and linked with the swarm.pem that is also fresh.&lt;/li&gt;
&lt;li&gt;If you run it again with a &lt;code&gt;swarm.pem&lt;/code&gt; AND login the original &lt;code&gt;email address&lt;/code&gt; used with that &lt;code&gt;swarm.pem&lt;/code&gt;. Note: this will throw an error into the log on registration but will still be able to sign transactions.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;In the following two scenarios, it will not work (i.e. you won&amp;rsquo;t have an on-chain identity linked with your RL Swarm peer training):&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;If you keep your &lt;code&gt;swarm.pem&lt;/code&gt; and try to link it to an &lt;code&gt;email address&lt;/code&gt; distinct from the one with which it was first registered.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;Therefore, you should do these actions in the following scenarios&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Signed up with &lt;code&gt;email address&lt;/code&gt;, generated &lt;code&gt;swarm.pem&lt;/code&gt;, BUT lost &lt;code&gt;swarm.pem&lt;/code&gt;&lt;/strong&gt; OR &lt;strong&gt;You want to run multiple nodes at once&lt;/strong&gt;: run from scratch with the same email address and generate a new &lt;code&gt;swarm.pem&lt;/code&gt;.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Signed up with &lt;code&gt;email address&lt;/code&gt;, generated &lt;code&gt;swarm.pem&lt;/code&gt;, kept &lt;code&gt;swarm.pem&lt;/code&gt;&lt;/strong&gt; -&amp;gt; you can re-run a single node using this pair if you&amp;rsquo;ve still got them both.&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;troubleshooting&#34;&gt;Troubleshooting
&lt;/h2&gt;&lt;ul&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;How do I find my logs?&lt;/strong&gt; You can find them inside the &lt;code&gt;/logs&lt;/code&gt; directory:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;code&gt;yarn.log&lt;/code&gt;: This file contains logs for the modal login server.&lt;/li&gt;
&lt;li&gt;&lt;code&gt;swarm.log&lt;/code&gt;: This is the main log file for the RL Swarm application.&lt;/li&gt;
&lt;li&gt;&lt;code&gt;wandb/&lt;/code&gt;: This directory contains various logs related to your training runs, including a &lt;code&gt;debug.log&lt;/code&gt; file. These can be updated to Weights &amp;amp; Biases (only available if you log_with wandb).&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;My peer &amp;lsquo;skipped a round&amp;rsquo;&lt;/strong&gt;: this occurs when your device isn&amp;rsquo;t fast enough to keep up with the pace of the swarm. For example, if you start training at round 100 and by the time you finish training the rest of the swarm reaches round 102, you will skip round 101 and go straight to 102. This is because your peer is more valuable if it is participating in the active round.&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;My model doesn&amp;rsquo;t seem to be training?&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;If you&amp;rsquo;re using a consumer device (e.g. a MacBook), it is likely just running slowly - check back in 20 minutes.&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;Logging in with a new account after previous login?&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Make sure you click &amp;lsquo;Logout&amp;rsquo; on the login screen before you leave your previous session&lt;/li&gt;
&lt;li&gt;Make sure you delete &lt;code&gt;swarm.pem&lt;/code&gt; from the root directory (try &lt;code&gt;sudo rm swarm.pem&lt;/code&gt;). If you don&amp;rsquo;t do this, and you previously registered with the peer-id stored in this file, it will disrupt the training process.&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;Issues with the Login screen&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Upgrade viem&lt;/strong&gt;: some users report issues with the &lt;code&gt;viem&lt;/code&gt; package. There are two fixes:
&lt;ul&gt;
&lt;li&gt;in the &lt;code&gt;modal-login/package.json&lt;/code&gt; update: &lt;code&gt;&amp;quot;viem&amp;quot;: &amp;quot;2.25.0&amp;quot;&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;in the terminal &lt;code&gt;cd /root/rl-swarm/modal-login/ &amp;amp;&amp;amp; yarn upgrade &amp;amp;&amp;amp; yarn add next@latest &amp;amp;&amp;amp; yarn add viem@latest&lt;/code&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;I&amp;rsquo;m getting lots of warnings&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;This is expected behaviour and usually the output of the package managers or other dependencies. The most common is the below Protobuf warning - which can be ignored
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-gdscript3&#34; data-lang=&#34;gdscript3&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;WARNING&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;:&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;The&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;candidate&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;selected&lt;/span&gt; &lt;span class=&#34;k&#34;&gt;for&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;download&lt;/span&gt; &lt;span class=&#34;ow&#34;&gt;or&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;install&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;is&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;a&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;yanked&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;version&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;:&lt;/span&gt; &lt;span class=&#34;s1&#34;&gt;&amp;#39;protobuf&amp;#39;&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;candidate&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;...&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;Issues on VMs/VPSs?&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;How do I access the login screen if I&amp;rsquo;m running in a VM?&lt;/strong&gt;: port forwarding. Add this SSH flag: &lt;code&gt;-L 3000:localhost:3000&lt;/code&gt; when connecting to your VM. E.g. &lt;code&gt;gcloud compute ssh --zone &amp;quot;us-central1-a&amp;quot; [your-vm] --project [your-project] -- -L 3000:localhost:3000&lt;/code&gt;. Note, some VPSs may not work with &lt;code&gt;rl-swarm&lt;/code&gt;. Check the Gensyn &lt;a class=&#34;link&#34; href=&#34;https://discord.gg/AdnyWNzXh5&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;discord&lt;/a&gt; for up-to-date information on this.&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;Disconnection/general issues&lt;/strong&gt;: If you are tunneling to a VM and suffer a broken pipe, you will likely encounter OOM or unexpected behaviour the first time you relaunch the script. If you &lt;code&gt;control + c&lt;/code&gt; and kill the script it should spin down all background processes. Restart the script and everything should work normally.&lt;/p&gt;
&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;Issues with npm/general installation?&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Try  &lt;code&gt;npm install -g node@latest&lt;/code&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;OOM errors on MacBook?&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Try this (experimental) fix to increase memory:
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-gdscript3&#34; data-lang=&#34;gdscript3&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;k&#34;&gt;export&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;PYTORCH_MPS_HIGH_WATERMARK_RATIO&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;mf&#34;&gt;0.0&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;I have a Windows machine, can I still train a model on the swarm?&lt;/strong&gt;: Yes - but this is not very well tested and may require you to do some debugging to get it set up properly. Install WSL and Linux on your Windows machine using the following instructions: &lt;a class=&#34;link&#34; href=&#34;https://learn.microsoft.com/en-us/windows/wsl/install&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;https://learn.microsoft.com/en-us/windows/wsl/install&lt;/a&gt;&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;I want to move my to a different machine and/or restart with a fresh build of the repo, but I want my animal name/peer id to persist.&lt;/strong&gt;: To achieve this simply backup the &lt;code&gt;swarm.pem&lt;/code&gt; file on your current machine and then put it in the corresponding location on your new machine/build of the repo.&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;I have multiple GPUs on one machine, can I run multiple peers?&lt;/strong&gt;: Yes - but you&amp;rsquo;ll need to manually change things. You&amp;rsquo;ll need to isolate each GPU, install this repo for each GPU, and expose each peer under a different port to pass the modal onboard.&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;My round/stage is behind the smart contract/other peers?&lt;/strong&gt;: This is expected behaviour given the different speeds of machines in the network. Once your machine completes it&amp;rsquo;s current round, it will move to the the current round.&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;I want to use a bigger and/or different model in the RL swarm, can I do that?&lt;/strong&gt;: Yes - but we only recommend doing so if you are comfortable understanding what size model can reasonably run on your hardware.  If you elect to bring a custom model, just paste the repo/model name into the command line when prompted.&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;I am running a model in the swarm on my CPU, have received a python &lt;code&gt;RuntimeError&lt;/code&gt;, and my training progress seems to have stopped.&lt;/strong&gt;: There are several possible causes for this, but before trying anything please wait long enough to be sure your training actually is frozen and not just slow (e.g., wait longer than a single training iteration has previously taken on your machine). If you&amp;rsquo;re sure training is actually frozen, then some things to try are:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Set this (experimental) fix: &lt;code&gt;export PYTORCH_MPS_HIGH_WATERMARK_RATIO=0.0 &amp;amp;&amp;amp; ./run_rl_swarm.sh&lt;/code&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;/ul&gt;
</description>
        </item>
        <item>
        <title>tinygrad</title>
        <link>https://producthunt.programnotes.cn/en/p/tinygrad/</link>
        <pubDate>Wed, 21 May 2025 15:30:12 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/tinygrad/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1718539503170-cec2c93a2f3d?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NDc4MTI0ODF8&amp;ixlib=rb-4.1.0" alt="Featured image of post tinygrad" /&gt;&lt;h1 id=&#34;tinygradtinygrad&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/tinygrad/tinygrad&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;tinygrad/tinygrad&lt;/a&gt;
&lt;/h1&gt;&lt;div align=&#34;center&#34;&gt;
&lt;picture&gt;
  &lt;source media=&#34;(prefers-color-scheme: light)&#34; srcset=&#34;https://producthunt.programnotes.cn/docs/logo_tiny_light.svg&#34;&gt;
&lt;/picture&gt;
&lt;p&gt;tinygrad: For something between &lt;a class=&#34;link&#34; href=&#34;https://github.com/pytorch/pytorch&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;PyTorch&lt;/a&gt; and &lt;a class=&#34;link&#34; href=&#34;https://github.com/karpathy/micrograd&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;karpathy/micrograd&lt;/a&gt;. Maintained by &lt;a class=&#34;link&#34; href=&#34;https://tinygrad.org&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;tiny corp&lt;/a&gt;.&lt;/p&gt;
&lt;h3&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/tinygrad/tinygrad&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Homepage&lt;/a&gt; | &lt;a class=&#34;link&#34; href=&#34;https://docs.tinygrad.org/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Documentation&lt;/a&gt; | &lt;a class=&#34;link&#34; href=&#34;https://discord.gg/ZjZadyC7PK&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Discord&lt;/a&gt;&lt;/p&gt;
&lt;/h3&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/tinygrad/tinygrad/stargazers&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/github/stars/tinygrad/tinygrad&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;GitHub Repo stars&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://github.com/tinygrad/tinygrad/actions/workflows/test.yml&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://github.com/tinygrad/tinygrad/actions/workflows/test.yml/badge.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Unit Tests&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://discord.gg/ZjZadyC7PK&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/discord/1068976834382925865&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Discord&#34;
	
	
&gt;&lt;/a&gt;&lt;/p&gt;
&lt;/div&gt;
&lt;hr&gt;
&lt;p&gt;This may not be the best deep learning framework, but it is a deep learning framework.&lt;/p&gt;
&lt;p&gt;Due to its extreme simplicity, it aims to be the easiest framework to add new accelerators to, with support for both inference and training. If XLA is CISC, tinygrad is RISC.&lt;/p&gt;
&lt;p&gt;tinygrad is still alpha software, but we &lt;a class=&#34;link&#34; href=&#34;https://geohot.github.io/blog/jekyll/update/2023/05/24/the-tiny-corp-raised-5M.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;raised some money&lt;/a&gt; to make it good. Someday, we will tape out chips.&lt;/p&gt;
&lt;h2 id=&#34;features&#34;&gt;Features
&lt;/h2&gt;&lt;h3 id=&#34;llama-and-stable-diffusion&#34;&gt;LLaMA and Stable Diffusion
&lt;/h3&gt;&lt;p&gt;tinygrad can run &lt;a class=&#34;link&#34; href=&#34;https://producthunt.programnotes.cn/docs/showcase.md#llama&#34; &gt;LLaMA&lt;/a&gt; and &lt;a class=&#34;link&#34; href=&#34;https://producthunt.programnotes.cn/docs/showcase.md#stable-diffusion&#34; &gt;Stable Diffusion&lt;/a&gt;!&lt;/p&gt;
&lt;h3 id=&#34;laziness&#34;&gt;Laziness
&lt;/h3&gt;&lt;p&gt;Try a matmul. See how, despite the style, it is fused into one kernel with the power of laziness.&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-sh&#34; data-lang=&#34;sh&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nv&#34;&gt;DEBUG&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;m&#34;&gt;3&lt;/span&gt; python3 -c &lt;span class=&#34;s2&#34;&gt;&amp;#34;from tinygrad import Tensor;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;s2&#34;&gt;N = 1024; a, b = Tensor.rand(N, N), Tensor.rand(N, N);
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;s2&#34;&gt;c = (a.reshape(N, 1, N) * b.T.reshape(1, N, N)).sum(axis=2);
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;s2&#34;&gt;print((c.numpy() - (a.numpy() @ b.numpy())).mean())&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;And we can change &lt;code&gt;DEBUG&lt;/code&gt; to &lt;code&gt;4&lt;/code&gt; to see the generated code.&lt;/p&gt;
&lt;h3 id=&#34;neural-networks&#34;&gt;Neural networks
&lt;/h3&gt;&lt;p&gt;As it turns out, 90% of what you need for neural networks are a decent autograd/tensor library.
Throw in an optimizer, a data loader, and some compute, and you have all you need.&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt; 1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 6
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 7
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 8
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 9
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;10
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;11
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;12
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;13
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;14
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;15
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;16
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;17
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;18
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;19
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;20
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-python&#34; data-lang=&#34;python&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;kn&#34;&gt;from&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;tinygrad&lt;/span&gt; &lt;span class=&#34;kn&#34;&gt;import&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;Tensor&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;nn&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;k&#34;&gt;class&lt;/span&gt; &lt;span class=&#34;nc&#34;&gt;LinearNet&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;:&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;k&#34;&gt;def&lt;/span&gt; &lt;span class=&#34;fm&#34;&gt;__init__&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;bp&#34;&gt;self&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;):&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;bp&#34;&gt;self&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;l1&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;Tensor&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;kaiming_uniform&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;784&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;mi&#34;&gt;128&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;bp&#34;&gt;self&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;l2&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;Tensor&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;kaiming_uniform&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;128&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;mi&#34;&gt;10&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;k&#34;&gt;def&lt;/span&gt; &lt;span class=&#34;fm&#34;&gt;__call__&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;bp&#34;&gt;self&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;x&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;:&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;Tensor&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;-&amp;gt;&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;Tensor&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;:&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;k&#34;&gt;return&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;x&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;flatten&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;1&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;dot&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;bp&#34;&gt;self&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;l1&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;relu&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;()&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;dot&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;bp&#34;&gt;self&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;l2&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;LinearNet&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;()&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;optim&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;nn&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;optim&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;Adam&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;([&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;l1&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;l2&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;],&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;lr&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;mf&#34;&gt;0.001&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;x&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;y&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;Tensor&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;rand&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;4&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;mi&#34;&gt;1&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;mi&#34;&gt;28&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;mi&#34;&gt;28&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;),&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;Tensor&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;([&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;2&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;4&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;3&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;7&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;])&lt;/span&gt;  &lt;span class=&#34;c1&#34;&gt;# replace with real mnist dataloader&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;k&#34;&gt;with&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;Tensor&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;train&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;():&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;k&#34;&gt;for&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;i&lt;/span&gt; &lt;span class=&#34;ow&#34;&gt;in&lt;/span&gt; &lt;span class=&#34;nb&#34;&gt;range&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;10&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;):&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;optim&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;zero_grad&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;()&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;loss&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;model&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;x&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;sparse_categorical_crossentropy&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;y&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;backward&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;()&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;n&#34;&gt;optim&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;step&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;()&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;nb&#34;&gt;print&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;i&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;loss&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;item&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;())&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;See &lt;a class=&#34;link&#34; href=&#34;examples/beautiful_mnist.py&#34; &gt;examples/beautiful_mnist.py&lt;/a&gt; for the full version that gets 98% in ~5 seconds&lt;/p&gt;
&lt;h2 id=&#34;accelerators&#34;&gt;Accelerators
&lt;/h2&gt;&lt;p&gt;tinygrad already supports numerous accelerators, including:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;input checked=&#34;&#34; disabled=&#34;&#34; type=&#34;checkbox&#34;&gt; &lt;a class=&#34;link&#34; href=&#34;tinygrad/runtime/ops_gpu.py&#34; &gt;GPU (OpenCL)&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;input checked=&#34;&#34; disabled=&#34;&#34; type=&#34;checkbox&#34;&gt; &lt;a class=&#34;link&#34; href=&#34;tinygrad/runtime/ops_cpu.py&#34; &gt;CPU (C Code)&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;input checked=&#34;&#34; disabled=&#34;&#34; type=&#34;checkbox&#34;&gt; &lt;a class=&#34;link&#34; href=&#34;tinygrad/runtime/ops_llvm.py&#34; &gt;LLVM&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;input checked=&#34;&#34; disabled=&#34;&#34; type=&#34;checkbox&#34;&gt; &lt;a class=&#34;link&#34; href=&#34;tinygrad/runtime/ops_metal.py&#34; &gt;METAL&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;input checked=&#34;&#34; disabled=&#34;&#34; type=&#34;checkbox&#34;&gt; &lt;a class=&#34;link&#34; href=&#34;tinygrad/runtime/ops_cuda.py&#34; &gt;CUDA&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;input checked=&#34;&#34; disabled=&#34;&#34; type=&#34;checkbox&#34;&gt; &lt;a class=&#34;link&#34; href=&#34;tinygrad/runtime/ops_amd.py&#34; &gt;AMD&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;input checked=&#34;&#34; disabled=&#34;&#34; type=&#34;checkbox&#34;&gt; &lt;a class=&#34;link&#34; href=&#34;tinygrad/runtime/ops_nv.py&#34; &gt;NV&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;input checked=&#34;&#34; disabled=&#34;&#34; type=&#34;checkbox&#34;&gt; &lt;a class=&#34;link&#34; href=&#34;tinygrad/runtime/ops_qcom.py&#34; &gt;QCOM&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;input checked=&#34;&#34; disabled=&#34;&#34; type=&#34;checkbox&#34;&gt; &lt;a class=&#34;link&#34; href=&#34;tinygrad/runtime/ops_webgpu.py&#34; &gt;WEBGPU&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;And it is easy to add more! Your accelerator of choice only needs to support a total of ~25 low level ops.&lt;/p&gt;
&lt;p&gt;To check default accelerator run: &lt;code&gt;python3 -c &amp;quot;from tinygrad import Device; print(Device.DEFAULT)&amp;quot;&lt;/code&gt;&lt;/p&gt;
&lt;h2 id=&#34;installation&#34;&gt;Installation
&lt;/h2&gt;&lt;p&gt;The current recommended way to install tinygrad is from source.&lt;/p&gt;
&lt;h3 id=&#34;from-source&#34;&gt;From source
&lt;/h3&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-sh&#34; data-lang=&#34;sh&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;git clone https://github.com/tinygrad/tinygrad.git
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;cd&lt;/span&gt; tinygrad
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;python3 -m pip install -e .
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h3 id=&#34;direct-master&#34;&gt;Direct (master)
&lt;/h3&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-sh&#34; data-lang=&#34;sh&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;python3 -m pip install git+https://github.com/tinygrad/tinygrad.git
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h2 id=&#34;documentation&#34;&gt;Documentation
&lt;/h2&gt;&lt;p&gt;Documentation along with a quick start guide can be found on the &lt;a class=&#34;link&#34; href=&#34;https://docs.tinygrad.org/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;docs website&lt;/a&gt; built from the &lt;a class=&#34;link&#34; href=&#34;https://producthunt.programnotes.cn/docs&#34; &gt;docs/&lt;/a&gt; directory.&lt;/p&gt;
&lt;h3 id=&#34;quick-example-comparing-to-pytorch&#34;&gt;Quick example comparing to PyTorch
&lt;/h3&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;6
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;7
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;8
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;9
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-python&#34; data-lang=&#34;python&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;kn&#34;&gt;from&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;tinygrad&lt;/span&gt; &lt;span class=&#34;kn&#34;&gt;import&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;Tensor&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;x&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;Tensor&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;eye&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;3&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;requires_grad&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;kc&#34;&gt;True&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;y&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;Tensor&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;([[&lt;/span&gt;&lt;span class=&#34;mf&#34;&gt;2.0&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;0&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;-&lt;/span&gt;&lt;span class=&#34;mf&#34;&gt;2.0&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;]],&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;requires_grad&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;kc&#34;&gt;True&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;z&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;y&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;matmul&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;x&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;sum&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;()&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;z&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;backward&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;()&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;print&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;x&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;grad&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;tolist&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;())&lt;/span&gt;  &lt;span class=&#34;c1&#34;&gt;# dz/dx&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;print&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;y&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;grad&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;tolist&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;())&lt;/span&gt;  &lt;span class=&#34;c1&#34;&gt;# dz/dy&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;The same thing but in PyTorch:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;6
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;7
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;8
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;9
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-python&#34; data-lang=&#34;python&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;kn&#34;&gt;import&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;torch&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;x&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;torch&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;eye&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;3&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;requires_grad&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;kc&#34;&gt;True&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;y&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;torch&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;tensor&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;([[&lt;/span&gt;&lt;span class=&#34;mf&#34;&gt;2.0&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;0&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;-&lt;/span&gt;&lt;span class=&#34;mf&#34;&gt;2.0&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;]],&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;requires_grad&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;kc&#34;&gt;True&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;z&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;y&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;matmul&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;x&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;sum&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;()&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;z&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;backward&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;()&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;print&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;x&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;grad&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;tolist&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;())&lt;/span&gt;  &lt;span class=&#34;c1&#34;&gt;# dz/dx&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;print&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;y&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;grad&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;tolist&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;())&lt;/span&gt;  &lt;span class=&#34;c1&#34;&gt;# dz/dy&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h2 id=&#34;contributing&#34;&gt;Contributing
&lt;/h2&gt;&lt;p&gt;There has been a lot of interest in tinygrad lately. Following these guidelines will help your PR get accepted.&lt;/p&gt;
&lt;p&gt;We&amp;rsquo;ll start with what will get your PR closed with a pointer to this section:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;No code golf! While low line count is a guiding light of this project, anything that remotely looks like code golf will be closed. The true goal is reducing complexity and increasing readability, and deleting &lt;code&gt;\n&lt;/code&gt;s does nothing to help with that.&lt;/li&gt;
&lt;li&gt;All docs and whitespace changes will be closed unless you are a well-known contributor. The people writing the docs should be those who know the codebase the absolute best. People who have not demonstrated that shouldn&amp;rsquo;t be messing with docs. Whitespace changes are both useless &lt;em&gt;and&lt;/em&gt; carry a risk of introducing bugs.&lt;/li&gt;
&lt;li&gt;Anything you claim is a &amp;ldquo;speedup&amp;rdquo; must be benchmarked. In general, the goal is simplicity, so even if your PR makes things marginally faster, you have to consider the tradeoff with maintainability and readability.&lt;/li&gt;
&lt;li&gt;In general, the code outside the core &lt;code&gt;tinygrad/&lt;/code&gt; folder is not well tested, so unless the current code there is broken, you shouldn&amp;rsquo;t be changing it.&lt;/li&gt;
&lt;li&gt;If your PR looks &amp;ldquo;complex&amp;rdquo;, is a big diff, or adds lots of lines, it won&amp;rsquo;t be reviewed or merged. Consider breaking it up into smaller PRs that are individually clear wins. A common pattern I see is prerequisite refactors before adding new functionality. If you can (cleanly) refactor to the point that the feature is a 3 line change, this is great, and something easy for us to review.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;Now, what we want:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Bug fixes (with a regression test) are great! This library isn&amp;rsquo;t 1.0 yet, so if you stumble upon a bug, fix it, write a test, and submit a PR, this is valuable work.&lt;/li&gt;
&lt;li&gt;Solving bounties! tinygrad &lt;a class=&#34;link&#34; href=&#34;https://docs.google.com/spreadsheets/d/1WKHbT-7KOgjEawq5h5Ic1qUWzpfAzuD_J06N1JwOCGs/edit?usp=sharing&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;offers cash bounties&lt;/a&gt; for certain improvements to the library. All new code should be high quality and well tested.&lt;/li&gt;
&lt;li&gt;Features. However, if you are adding a feature, consider the line tradeoff. If it&amp;rsquo;s 3 lines, there&amp;rsquo;s less of a bar of usefulness it has to meet over something that&amp;rsquo;s 30 or 300 lines. All features must have regression tests. In general with no other constraints, your feature&amp;rsquo;s API should match torch or numpy.&lt;/li&gt;
&lt;li&gt;Refactors that are clear wins. In general, if your refactor isn&amp;rsquo;t a clear win it will be closed. But some refactors are amazing! Think about readability in a deep core sense. A whitespace change or moving a few functions around is useless, but if you realize that two 100 line functions can actually use the same 110 line function with arguments while also improving readability, this is a big win. Refactors should pass &lt;a class=&#34;link&#34; href=&#34;#process-replay-tests&#34; &gt;process replay&lt;/a&gt;.&lt;/li&gt;
&lt;li&gt;Tests/fuzzers. If you can add tests that are non brittle, they are welcome. We have some fuzzers in here too, and there&amp;rsquo;s a plethora of bugs that can be found with them and by improving them. Finding bugs, even writing broken tests (that should pass) with &lt;code&gt;@unittest.expectedFailure&lt;/code&gt; is great. This is how we make progress.&lt;/li&gt;
&lt;li&gt;Dead code removal from core &lt;code&gt;tinygrad/&lt;/code&gt; folder. We don&amp;rsquo;t care about the code in extra, but removing dead code from the core library is great. Less for new people to read and be confused by.&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;running-tests&#34;&gt;Running tests
&lt;/h3&gt;&lt;p&gt;You should install the pre-commit hooks with &lt;code&gt;pre-commit install&lt;/code&gt;. This will run the linter, mypy, and a subset of the tests on every commit.&lt;/p&gt;
&lt;p&gt;For more examples on how to run the full test suite please refer to the &lt;a class=&#34;link&#34; href=&#34;.github/workflows/test.yml&#34; &gt;CI workflow&lt;/a&gt;.&lt;/p&gt;
&lt;p&gt;Some examples of running tests locally:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-sh&#34; data-lang=&#34;sh&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;python3 -m pip install -e &lt;span class=&#34;s1&#34;&gt;&amp;#39;.[testing]&amp;#39;&lt;/span&gt;  &lt;span class=&#34;c1&#34;&gt;# install extra deps for testing&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;python3 test/test_ops.py                &lt;span class=&#34;c1&#34;&gt;# just the ops tests&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;python3 -m pytest test/                 &lt;span class=&#34;c1&#34;&gt;# whole test suite&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h4 id=&#34;process-replay-tests&#34;&gt;Process replay tests
&lt;/h4&gt;&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/tinygrad/tinygrad/blob/master/test/external/process_replay/README.md&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Process replay&lt;/a&gt; compares your PR&amp;rsquo;s generated kernels against master. If your PR is a refactor or speedup without any expected behavior change, It should include [pr] in the pull request title.&lt;/p&gt;
</description>
        </item>
        <item>
        <title>skypilot</title>
        <link>https://producthunt.programnotes.cn/en/p/skypilot/</link>
        <pubDate>Sun, 27 Apr 2025 15:27:09 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/skypilot/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1696448022220-4d357587ddac?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NDU3Mzg2ODJ8&amp;ixlib=rb-4.0.3" alt="Featured image of post skypilot" /&gt;&lt;h1 id=&#34;skypilot-orgskypilot&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/skypilot-org/skypilot&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;skypilot-org/skypilot&lt;/a&gt;
&lt;/h1&gt;&lt;p align=&#34;center&#34;&gt;
  &lt;picture&gt;
    &lt;source media=&#34;(prefers-color-scheme: dark)&#34; srcset=&#34;https://raw.githubusercontent.com/skypilot-org/skypilot/master/docs/source/images/skypilot-wide-dark-1k.png&#34;&gt;
    &lt;img alt=&#34;SkyPilot&#34; src=&#34;https://raw.githubusercontent.com/skypilot-org/skypilot/master/docs/source/images/skypilot-wide-light-1k.png&#34; width=55%&gt;
  &lt;/picture&gt;
&lt;/p&gt;
&lt;p align=&#34;center&#34;&gt;
  &lt;a href=&#34;https://docs.skypilot.co/&#34;&gt;
    &lt;img alt=&#34;Documentation&#34; src=&#34;https://img.shields.io/badge/docs-gray?logo=readthedocs&amp;logoColor=f5f5f5&#34;&gt;
  &lt;/a&gt;
  &lt;a href=&#34;https://github.com/skypilot-org/skypilot/releases&#34;&gt;
    &lt;img alt=&#34;GitHub Release&#34; src=&#34;https://img.shields.io/github/release/skypilot-org/skypilot.svg&#34;&gt;
  &lt;/a&gt;
  &lt;a href=&#34;http://slack.skypilot.co&#34;&gt;
    &lt;img alt=&#34;Join Slack&#34; src=&#34;https://img.shields.io/badge/SkyPilot-Join%20Slack-blue?logo=slack&#34;&gt;
  &lt;/a&gt;
  &lt;a href=&#34;https://github.com/skypilot-org/skypilot/releases&#34;&gt;
    &lt;img alt=&#34;Downloads&#34; src=&#34;https://img.shields.io/pypi/dm/skypilot&#34;&gt;
  &lt;/a&gt;
&lt;/p&gt;
&lt;h3 align=&#34;center&#34;&gt;
    Run AI on Any Infra — Unified, Faster, Cheaper
&lt;/h3&gt;
&lt;hr&gt;
&lt;p&gt;:fire: &lt;em&gt;News&lt;/em&gt; :fire:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;[Mar 2025] Run and serve &lt;strong&gt;Google Gemma 3&lt;/strong&gt; using SkyPilot &lt;a class=&#34;link&#34; href=&#34;./llm/gemma3/&#34; &gt;&lt;strong&gt;example&lt;/strong&gt;&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;[Feb 2025] Prepare and serve &lt;strong&gt;Retrieval Augmented Generation (RAG) with DeepSeek-R1&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://blog.skypilot.co/deepseek-rag&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;strong&gt;blog post&lt;/strong&gt;&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;./llm/rag/&#34; &gt;&lt;strong&gt;example&lt;/strong&gt;&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;[Feb 2025] Run and serve &lt;strong&gt;DeepSeek-R1 671B&lt;/strong&gt; using SkyPilot and SGLang with high throughput: &lt;a class=&#34;link&#34; href=&#34;./llm/deepseek-r1/&#34; &gt;&lt;strong&gt;example&lt;/strong&gt;&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;[Feb 2025] Prepare and serve large-scale image search with &lt;strong&gt;vector databases&lt;/strong&gt;: &lt;a class=&#34;link&#34; href=&#34;https://blog.skypilot.co/large-scale-vector-database/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;strong&gt;blog post&lt;/strong&gt;&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;./examples/vector_database/&#34; &gt;&lt;strong&gt;example&lt;/strong&gt;&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;[Jan 2025] Launch and serve distilled models from &lt;strong&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/deepseek-ai/DeepSeek-R1&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;DeepSeek-R1&lt;/a&gt;&lt;/strong&gt; and &lt;strong&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/deepseek-ai/DeepSeek-Janus&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Janus&lt;/a&gt;&lt;/strong&gt; on Kubernetes or any cloud: &lt;a class=&#34;link&#34; href=&#34;./llm/deepseek-r1-distilled/&#34; &gt;&lt;strong&gt;R1 example&lt;/strong&gt;&lt;/a&gt; and &lt;a class=&#34;link&#34; href=&#34;./llm/deepseek-janus/&#34; &gt;&lt;strong&gt;Janus example&lt;/strong&gt;&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;[Oct 2024] :tada: &lt;strong&gt;SkyPilot crossed 1M+ downloads&lt;/strong&gt; :tada:: Thank you to our community! &lt;a class=&#34;link&#34; href=&#34;https://x.com/skypilot_org/status/1844770841718067638&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;strong&gt;Twitter/X&lt;/strong&gt;&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;[Sep 2024] Point, launch and serve &lt;strong&gt;Llama 3.2&lt;/strong&gt; on Kubernetes or any cloud: &lt;a class=&#34;link&#34; href=&#34;./llm/llama-3_2/&#34; &gt;&lt;strong&gt;example&lt;/strong&gt;&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;[Sep 2024] Run and deploy &lt;a class=&#34;link&#34; href=&#34;./llm/pixtral&#34; &gt;&lt;strong&gt;Pixtral&lt;/strong&gt;&lt;/a&gt;, the first open-source multimodal model from Mistral AI.&lt;/li&gt;
&lt;li&gt;[Jun 2024] Reproduce &lt;strong&gt;GPT&lt;/strong&gt; with &lt;a class=&#34;link&#34; href=&#34;https://github.com/karpathy/llm.c/discussions/481&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;llm.c&lt;/a&gt; on any cloud: &lt;a class=&#34;link&#34; href=&#34;./llm/gpt-2/&#34; &gt;&lt;strong&gt;guide&lt;/strong&gt;&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;[Apr 2024] Serve &lt;a class=&#34;link&#34; href=&#34;https://qwenlm.github.io/blog/qwen1.5-110b/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;strong&gt;Qwen-110B&lt;/strong&gt;&lt;/a&gt; on your infra: &lt;a class=&#34;link&#34; href=&#34;./llm/qwen/&#34; &gt;&lt;strong&gt;example&lt;/strong&gt;&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;[Apr 2024] Host &lt;a class=&#34;link&#34; href=&#34;https://github.com/ollama/ollama&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;strong&gt;Ollama&lt;/strong&gt;&lt;/a&gt; on the cloud to deploy LLMs on CPUs and GPUs: &lt;a class=&#34;link&#34; href=&#34;./llm/ollama/&#34; &gt;&lt;strong&gt;example&lt;/strong&gt;&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;&lt;strong&gt;LLM Finetuning Cookbooks&lt;/strong&gt;: Finetuning Llama 2 / Llama 3.1 in your own cloud environment, privately: Llama 2 &lt;a class=&#34;link&#34; href=&#34;./llm/vicuna-llama-2/&#34; &gt;&lt;strong&gt;example&lt;/strong&gt;&lt;/a&gt; and &lt;a class=&#34;link&#34; href=&#34;https://blog.skypilot.co/finetuning-llama2-operational-guide/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;strong&gt;blog&lt;/strong&gt;&lt;/a&gt;; Llama 3.1 &lt;a class=&#34;link&#34; href=&#34;./llm/llama-3_1-finetuning/&#34; &gt;&lt;strong&gt;example&lt;/strong&gt;&lt;/a&gt; and &lt;a class=&#34;link&#34; href=&#34;https://blog.skypilot.co/finetune-llama-3_1-on-your-infra/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;strong&gt;blog&lt;/strong&gt;&lt;/a&gt;&lt;/p&gt;
&lt;hr&gt;
&lt;p&gt;SkyPilot is an open-source framework for running AI and batch workloads on any infra.&lt;/p&gt;
&lt;p&gt;SkyPilot &lt;strong&gt;is easy to use for AI users&lt;/strong&gt;:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Quickly spin up compute on your own infra&lt;/li&gt;
&lt;li&gt;Environment and job as code — simple and portable&lt;/li&gt;
&lt;li&gt;Easy job management: queue, run, and auto-recover many jobs&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;SkyPilot &lt;strong&gt;unifies multiple clusters, clouds, and hardware&lt;/strong&gt;:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;One interface to use reserved GPUs, Kubernetes clusters, or 16+ clouds&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/auto-failover.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Flexible provisioning&lt;/a&gt; of GPUs, TPUs, CPUs, with auto-retry&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/reference/api-server/api-server.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Team deployment&lt;/a&gt; and resource sharing&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;SkyPilot &lt;strong&gt;cuts your cloud costs &amp;amp; maximizes GPU availability&lt;/strong&gt;:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Autostop: automatic cleanup of idle resources&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/managed-jobs.html#running-on-spot-instances&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Spot instance support&lt;/a&gt;: 3-6x cost savings, with preemption auto-recovery&lt;/li&gt;
&lt;li&gt;Intelligent scheduling: automatically run on the cheapest &amp;amp; most available infra&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;SkyPilot supports your existing GPU, TPU, and CPU workloads, with no code changes.&lt;/p&gt;
&lt;p&gt;Install with pip:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# Choose your clouds:&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;pip install -U &lt;span class=&#34;s2&#34;&gt;&amp;#34;skypilot[kubernetes,aws,gcp,azure,oci,lambda,runpod,fluidstack,paperspace,cudo,ibm,scp,nebius]&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;To get the latest features and fixes, use the nightly build or &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/getting-started/installation.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;install from source&lt;/a&gt;:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# Choose your clouds:&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;pip install &lt;span class=&#34;s2&#34;&gt;&amp;#34;skypilot-nightly[kubernetes,aws,gcp,azure,oci,lambda,runpod,fluidstack,paperspace,cudo,ibm,scp,nebius]&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p align=&#34;center&#34;&gt;
  &lt;img src=&#34;docs/source/_static/intro.gif&#34; alt=&#34;SkyPilot&#34;&gt;
&lt;/p&gt;
&lt;p&gt;Current supported infra: Kubernetes, AWS, GCP, Azure, OCI, Lambda Cloud, Fluidstack,
RunPod, Cudo, Digital Ocean, Paperspace, Cloudflare, Samsung, IBM, Vast.ai,
VMware vSphere, Nebius.&lt;/p&gt;
&lt;p align=&#34;center&#34;&gt;
  &lt;picture&gt;
    &lt;source media=&#34;(prefers-color-scheme: dark)&#34; srcset=&#34;https://raw.githubusercontent.com/skypilot-org/skypilot/master/docs/source/images/cloud-logos-dark.png&#34;&gt;
    &lt;img alt=&#34;SkyPilot&#34; src=&#34;https://raw.githubusercontent.com/skypilot-org/skypilot/master/docs/source/images/cloud-logos-light.png&#34; width=85%&gt;
  &lt;/picture&gt;
&lt;/p&gt;
&lt;!-- source xcf file: https://drive.google.com/drive/folders/1S_acjRsAD3T14qMeEnf6FFrIwHu_Gs_f?usp=drive_link --&gt;
&lt;h2 id=&#34;getting-started&#34;&gt;Getting started
&lt;/h2&gt;&lt;p&gt;You can find our documentation &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;here&lt;/a&gt;.&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/getting-started/installation.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Installation&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/getting-started/quickstart.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Quickstart&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/reference/cli.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;CLI reference&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;skypilot-in-1-minute&#34;&gt;SkyPilot in 1 minute
&lt;/h2&gt;&lt;p&gt;A SkyPilot task specifies: resource requirements, data to be synced, setup commands, and the task commands.&lt;/p&gt;
&lt;p&gt;Once written in this &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/reference/yaml-spec.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;strong&gt;unified interface&lt;/strong&gt;&lt;/a&gt; (YAML or Python API), the task can be launched on any available cloud.  This avoids vendor lock-in, and allows easily moving jobs to a different provider.&lt;/p&gt;
&lt;p&gt;Paste the following into a file &lt;code&gt;my_task.yaml&lt;/code&gt;:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt; 1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 6
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 7
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 8
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 9
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;10
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;11
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;12
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;13
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;14
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;15
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;16
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;17
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;18
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;19
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-yaml&#34; data-lang=&#34;yaml&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nt&#34;&gt;resources&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;:&lt;/span&gt;&lt;span class=&#34;w&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;w&#34;&gt;  &lt;/span&gt;&lt;span class=&#34;nt&#34;&gt;accelerators&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;:&lt;/span&gt;&lt;span class=&#34;w&#34;&gt; &lt;/span&gt;&lt;span class=&#34;l&#34;&gt;A100:8 &lt;/span&gt;&lt;span class=&#34;w&#34;&gt; &lt;/span&gt;&lt;span class=&#34;c&#34;&gt;# 8x NVIDIA A100 GPU&lt;/span&gt;&lt;span class=&#34;w&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;w&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nt&#34;&gt;num_nodes&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;:&lt;/span&gt;&lt;span class=&#34;w&#34;&gt; &lt;/span&gt;&lt;span class=&#34;m&#34;&gt;1&lt;/span&gt;&lt;span class=&#34;w&#34;&gt;  &lt;/span&gt;&lt;span class=&#34;c&#34;&gt;# Number of VMs to launch&lt;/span&gt;&lt;span class=&#34;w&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;w&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c&#34;&gt;# Working directory (optional) containing the project codebase.&lt;/span&gt;&lt;span class=&#34;w&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c&#34;&gt;# Its contents are synced to ~/sky_workdir/ on the cluster.&lt;/span&gt;&lt;span class=&#34;w&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nt&#34;&gt;workdir&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;:&lt;/span&gt;&lt;span class=&#34;w&#34;&gt; &lt;/span&gt;&lt;span class=&#34;l&#34;&gt;~/torch_examples&lt;/span&gt;&lt;span class=&#34;w&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;w&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c&#34;&gt;# Commands to be run before executing the job.&lt;/span&gt;&lt;span class=&#34;w&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c&#34;&gt;# Typical use: pip install -r requirements.txt, git clone, etc.&lt;/span&gt;&lt;span class=&#34;w&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nt&#34;&gt;setup&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;:&lt;/span&gt;&lt;span class=&#34;w&#34;&gt; &lt;/span&gt;&lt;span class=&#34;p&#34;&gt;|&lt;/span&gt;&lt;span class=&#34;sd&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;sd&#34;&gt;  pip install &amp;#34;torch&amp;lt;2.2&amp;#34; torchvision --index-url https://download.pytorch.org/whl/cu121&lt;/span&gt;&lt;span class=&#34;w&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;w&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c&#34;&gt;# Commands to run as a job.&lt;/span&gt;&lt;span class=&#34;w&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c&#34;&gt;# Typical use: launch the main program.&lt;/span&gt;&lt;span class=&#34;w&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nt&#34;&gt;run&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;:&lt;/span&gt;&lt;span class=&#34;w&#34;&gt; &lt;/span&gt;&lt;span class=&#34;p&#34;&gt;|&lt;/span&gt;&lt;span class=&#34;sd&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;sd&#34;&gt;  cd mnist
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;sd&#34;&gt;  python main.py --epochs 1&lt;/span&gt;&lt;span class=&#34;w&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;Prepare the workdir by cloning:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;git clone https://github.com/pytorch/examples.git ~/torch_examples
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;Launch with &lt;code&gt;sky launch&lt;/code&gt; (note: &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/cloud-setup/quota.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;access to GPU instances&lt;/a&gt; is needed for this example):&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;sky launch my_task.yaml
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;SkyPilot then performs the heavy-lifting for you, including:&lt;/p&gt;
&lt;ol&gt;
&lt;li&gt;Find the lowest priced VM instance type across different clouds&lt;/li&gt;
&lt;li&gt;Provision the VM, with auto-failover if the cloud returned capacity errors&lt;/li&gt;
&lt;li&gt;Sync the local &lt;code&gt;workdir&lt;/code&gt; to the VM&lt;/li&gt;
&lt;li&gt;Run the task&amp;rsquo;s &lt;code&gt;setup&lt;/code&gt; commands to prepare the VM for running the task&lt;/li&gt;
&lt;li&gt;Run the task&amp;rsquo;s &lt;code&gt;run&lt;/code&gt; commands&lt;/li&gt;
&lt;/ol&gt;
&lt;p&gt;See &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/getting-started/quickstart.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Quickstart&lt;/a&gt; to get started with SkyPilot.&lt;/p&gt;
&lt;h2 id=&#34;runnable-examples&#34;&gt;Runnable examples
&lt;/h2&gt;&lt;p&gt;See &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/docs-examples/examples/index.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;strong&gt;SkyPilot examples&lt;/strong&gt;&lt;/a&gt; that cover: development, training, serving, LLM models, AI apps, and common frameworks.&lt;/p&gt;
&lt;p&gt;Latest featured examples:&lt;/p&gt;
&lt;table&gt;
  &lt;thead&gt;
      &lt;tr&gt;
          &lt;th&gt;Task&lt;/th&gt;
          &lt;th&gt;Examples&lt;/th&gt;
      &lt;/tr&gt;
  &lt;/thead&gt;
  &lt;tbody&gt;
      &lt;tr&gt;
          &lt;td&gt;Training&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/getting-started/tutorial.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;PyTorch&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/training/deepspeed.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;DeepSpeed&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/training/llama-3_1-finetuning.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Finetune Llama 3&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/training/nemo.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NeMo&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/training/ray.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Ray&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/training/unsloth.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Unsloth&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/training/tpu.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Jax/TPU&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;Serving&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/serving/vllm.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;vLLM&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/serving/sglang.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;SGLang&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/serving/ollama.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Ollama&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;Models&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/models/deepseek-r1.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;DeepSeek-R1&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/models/llama-3.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Llama 3&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/models/codellama.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;CodeLlama&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/models/qwen.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Qwen&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/models/mixtral.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Mixtral&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;AI apps&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/applications/rag.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;RAG&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/applications/vector_database.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;vector databases&lt;/a&gt; (ChromaDB, CLIP)&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;Common frameworks&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/frameworks/airflow.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Airflow&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/examples/frameworks/jupyter.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Jupyter&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
  &lt;/tbody&gt;
&lt;/table&gt;
&lt;p&gt;Source files and more examples can be found in &lt;a class=&#34;link&#34; href=&#34;https://github.com/skypilot-org/skypilot/tree/master/llm&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;code&gt;llm/&lt;/code&gt;&lt;/a&gt; and &lt;a class=&#34;link&#34; href=&#34;https://github.com/skypilot-org/skypilot/tree/master/examples&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;code&gt;examples/&lt;/code&gt;&lt;/a&gt;.&lt;/p&gt;
&lt;h2 id=&#34;more-information&#34;&gt;More information
&lt;/h2&gt;&lt;p&gt;To learn more, see &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/overview.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;SkyPilot Overview&lt;/a&gt;, &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;SkyPilot docs&lt;/a&gt;, and &lt;a class=&#34;link&#34; href=&#34;https://blog.skypilot.co/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;SkyPilot blog&lt;/a&gt;.&lt;/p&gt;
&lt;p&gt;Case studies and integrations: &lt;a class=&#34;link&#34; href=&#34;https://blog.skypilot.co/community/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Community Spotlights&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;Follow updates:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;http://slack.skypilot.co&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Slack&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://twitter.com/skypilot_org&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;X / Twitter&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.linkedin.com/company/skypilot-oss/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;LinkedIn&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://blog.skypilot.co/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;SkyPilot Blog&lt;/a&gt; (&lt;a class=&#34;link&#34; href=&#34;https://blog.skypilot.co/introducing-skypilot/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Introductory blog post&lt;/a&gt;)&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;Read the research:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.usenix.org/system/files/nsdi23-yang-zongheng.pdf&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;SkyPilot paper&lt;/a&gt; and &lt;a class=&#34;link&#34; href=&#34;https://www.usenix.org/conference/nsdi23/presentation/yang-zongheng&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;talk&lt;/a&gt; (NSDI 2023)&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://arxiv.org/abs/2205.07147&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Sky Computing whitepaper&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://sigops.org/s/conferences/hotos/2021/papers/hotos21-s02-stoica.pdf&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Sky Computing vision paper&lt;/a&gt; (HotOS 2021)&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://arxiv.org/pdf/2411.01438&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;SkyServe: AI serving across regions and clouds&lt;/a&gt; (EuroSys 2025)&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.usenix.org/conference/nsdi24/presentation/wu-zhanghao&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Managed jobs spot instance policy&lt;/a&gt;  (NSDI 2024)&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;SkyPilot was initially started at the &lt;a class=&#34;link&#34; href=&#34;https://sky.cs.berkeley.edu&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Sky Computing Lab&lt;/a&gt; at UC Berkeley and has since gained many industry contributors. To read about the project&amp;rsquo;s origin and vision, see &lt;a class=&#34;link&#34; href=&#34;https://docs.skypilot.co/en/latest/sky-computing.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Concept: Sky Computing&lt;/a&gt;.&lt;/p&gt;
&lt;h2 id=&#34;questions-and-feedback&#34;&gt;Questions and feedback
&lt;/h2&gt;&lt;p&gt;We are excited to hear your feedback:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;For issues and feature requests, please &lt;a class=&#34;link&#34; href=&#34;https://github.com/skypilot-org/skypilot/issues/new&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;open a GitHub issue&lt;/a&gt;.&lt;/li&gt;
&lt;li&gt;For questions, please use &lt;a class=&#34;link&#34; href=&#34;https://github.com/skypilot-org/skypilot/discussions&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;GitHub Discussions&lt;/a&gt;.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;For general discussions, join us on the &lt;a class=&#34;link&#34; href=&#34;http://slack.skypilot.co&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;SkyPilot Slack&lt;/a&gt;.&lt;/p&gt;
&lt;h2 id=&#34;contributing&#34;&gt;Contributing
&lt;/h2&gt;&lt;p&gt;We welcome all contributions to the project! See &lt;a class=&#34;link&#34; href=&#34;CONTRIBUTING.md&#34; &gt;CONTRIBUTING&lt;/a&gt; for how to get involved.&lt;/p&gt;
</description>
        </item>
        <item>
        <title>tensorflow</title>
        <link>https://producthunt.programnotes.cn/en/p/tensorflow/</link>
        <pubDate>Fri, 25 Apr 2025 15:28:35 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/tensorflow/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1639475377520-b256a5d204b1?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NDU1NjYwMzd8&amp;ixlib=rb-4.0.3" alt="Featured image of post tensorflow" /&gt;&lt;h1 id=&#34;tensorflowtensorflow&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/tensorflow/tensorflow&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;tensorflow/tensorflow&lt;/a&gt;
&lt;/h1&gt;&lt;div align=&#34;center&#34;&gt;
  &lt;img src=&#34;https://www.tensorflow.org/images/tf_logo_horizontal.png&#34;&gt;
&lt;/div&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://badge.fury.io/py/tensorflow&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/pypi/pyversions/tensorflow.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Python&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://badge.fury.io/py/tensorflow&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://badge.fury.io/py/tensorflow.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;PyPI&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://doi.org/10.5281/zenodo.4724125&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://zenodo.org/badge/DOI/10.5281/zenodo.4724125.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;DOI&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://bestpractices.coreinfrastructure.org/projects/1486&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://bestpractices.coreinfrastructure.org/projects/1486/badge&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;CII Best Practices&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://securityscorecards.dev/viewer/?uri=github.com/tensorflow/tensorflow&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://api.securityscorecards.dev/projects/github.com/tensorflow/tensorflow/badge&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;OpenSSF Scorecard&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&amp;amp;can=1&amp;amp;q=proj:tensorflow&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://oss-fuzz-build-logs.storage.googleapis.com/badges/tensorflow.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Fuzzing Status&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&amp;amp;can=1&amp;amp;q=proj:tensorflow-py&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://oss-fuzz-build-logs.storage.googleapis.com/badges/tensorflow-py.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Fuzzing Status&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://ossrank.com/p/44&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://shields.io/endpoint?url=https://ossrank.com/shield/44&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;OSSRank&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;CODE_OF_CONDUCT.md&#34; &gt;&lt;img src=&#34;https://img.shields.io/badge/Contributor%20Covenant-v1.4%20adopted-ff69b4.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Contributor Covenant&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://tensorflow.github.io/build#TF%20Official%20Continuous&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://tensorflow.github.io/build/TF%20Official%20Continuous.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;TF Official Continuous&#34;
	
	
&gt;&lt;/a&gt;
&lt;a class=&#34;link&#34; href=&#34;https://tensorflow.github.io/build#TF%20Official%20Nightly&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://tensorflow.github.io/build/TF%20Official%20Nightly.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;TF Official Nightly&#34;
	
	
&gt;&lt;/a&gt;&lt;/p&gt;
&lt;table&gt;
  &lt;thead&gt;
      &lt;tr&gt;
          &lt;th&gt;&lt;strong&gt;&lt;code&gt;Documentation&lt;/code&gt;&lt;/strong&gt;&lt;/th&gt;
      &lt;/tr&gt;
  &lt;/thead&gt;
  &lt;tbody&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/api_docs/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://img.shields.io/badge/api-reference-blue.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Documentation&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
  &lt;/tbody&gt;
&lt;/table&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;TensorFlow&lt;/a&gt; is an end-to-end open source platform
for machine learning. It has a comprehensive, flexible ecosystem of
&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/resources/tools&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;tools&lt;/a&gt;,
&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/resources/libraries-extensions&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;libraries&lt;/a&gt;, and
&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/community&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;community&lt;/a&gt; resources that lets
researchers push the state-of-the-art in ML and developers easily build and
deploy ML-powered applications.&lt;/p&gt;
&lt;p&gt;TensorFlow was originally developed by researchers and engineers working within
the Machine Intelligence team at Google Brain to conduct research in machine
learning and neural networks. However, the framework is versatile enough to be
used in other areas as well.&lt;/p&gt;
&lt;p&gt;TensorFlow provides stable &lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/api_docs/python&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Python&lt;/a&gt;
and &lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/api_docs/cc&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;C++&lt;/a&gt; APIs, as well as a
non-guaranteed backward compatible API for
&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/api_docs&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;other languages&lt;/a&gt;.&lt;/p&gt;
&lt;p&gt;Keep up-to-date with release announcements and security updates by subscribing
to
&lt;a class=&#34;link&#34; href=&#34;https://groups.google.com/a/tensorflow.org/forum/#!forum/announce&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;announce@tensorflow.org&lt;/a&gt;.
See all the &lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/community/forums&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;mailing lists&lt;/a&gt;.&lt;/p&gt;
&lt;h2 id=&#34;install&#34;&gt;Install
&lt;/h2&gt;&lt;p&gt;See the &lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/install&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;TensorFlow install guide&lt;/a&gt; for the
&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/install/pip&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;pip package&lt;/a&gt;, to
&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/install/gpu&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;enable GPU support&lt;/a&gt;, use a
&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/install/docker&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Docker container&lt;/a&gt;, and
&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/install/source&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;build from source&lt;/a&gt;.&lt;/p&gt;
&lt;p&gt;To install the current release, which includes support for
&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/install/gpu&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;CUDA-enabled GPU cards&lt;/a&gt; &lt;em&gt;(Ubuntu and
Windows)&lt;/em&gt;:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-fallback&#34; data-lang=&#34;fallback&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;$ pip install tensorflow
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;Other devices (DirectX and MacOS-metal) are supported using
&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/install/gpu_plugins#available_devices&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Device plugins&lt;/a&gt;.&lt;/p&gt;
&lt;p&gt;A smaller CPU-only package is also available:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-fallback&#34; data-lang=&#34;fallback&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;$ pip install tensorflow-cpu
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;To update TensorFlow to the latest version, add &lt;code&gt;--upgrade&lt;/code&gt; flag to the above
commands.&lt;/p&gt;
&lt;p&gt;&lt;em&gt;Nightly binaries are available for testing using the
&lt;a class=&#34;link&#34; href=&#34;https://pypi.python.org/pypi/tf-nightly&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;tf-nightly&lt;/a&gt; and
&lt;a class=&#34;link&#34; href=&#34;https://pypi.python.org/pypi/tf-nightly-cpu&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;tf-nightly-cpu&lt;/a&gt; packages on PyPI.&lt;/em&gt;&lt;/p&gt;
&lt;h4 id=&#34;try-your-first-tensorflow-program&#34;&gt;&lt;em&gt;Try your first TensorFlow program&lt;/em&gt;
&lt;/h4&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-shell&#34; data-lang=&#34;shell&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;$ python
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;6
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-python&#34; data-lang=&#34;python&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;o&#34;&gt;&amp;gt;&amp;gt;&amp;gt;&lt;/span&gt; &lt;span class=&#34;kn&#34;&gt;import&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;tensorflow&lt;/span&gt; &lt;span class=&#34;k&#34;&gt;as&lt;/span&gt; &lt;span class=&#34;nn&#34;&gt;tf&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;o&#34;&gt;&amp;gt;&amp;gt;&amp;gt;&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;tf&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;add&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;mi&#34;&gt;1&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;mi&#34;&gt;2&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;numpy&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;()&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;mi&#34;&gt;3&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;o&#34;&gt;&amp;gt;&amp;gt;&amp;gt;&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;hello&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;tf&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;constant&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;s1&#34;&gt;&amp;#39;Hello, TensorFlow!&amp;#39;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;o&#34;&gt;&amp;gt;&amp;gt;&amp;gt;&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;hello&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;n&#34;&gt;numpy&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;()&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;sa&#34;&gt;b&lt;/span&gt;&lt;span class=&#34;s1&#34;&gt;&amp;#39;Hello, TensorFlow!&amp;#39;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;For more examples, see the
&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/tutorials/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;TensorFlow tutorials&lt;/a&gt;.&lt;/p&gt;
&lt;h2 id=&#34;contribution-guidelines&#34;&gt;Contribution guidelines
&lt;/h2&gt;&lt;p&gt;&lt;strong&gt;If you want to contribute to TensorFlow, be sure to review the
&lt;a class=&#34;link&#34; href=&#34;CONTRIBUTING.md&#34; &gt;contribution guidelines&lt;/a&gt;. This project adheres to TensorFlow&amp;rsquo;s
&lt;a class=&#34;link&#34; href=&#34;CODE_OF_CONDUCT.md&#34; &gt;code of conduct&lt;/a&gt;. By participating, you are expected to
uphold this code.&lt;/strong&gt;&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;We use &lt;a class=&#34;link&#34; href=&#34;https://github.com/tensorflow/tensorflow/issues&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;GitHub issues&lt;/a&gt; for
tracking requests and bugs, please see
&lt;a class=&#34;link&#34; href=&#34;https://discuss.tensorflow.org/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;TensorFlow Forum&lt;/a&gt; for general questions and
discussion, and please direct specific questions to
&lt;a class=&#34;link&#34; href=&#34;https://stackoverflow.com/questions/tagged/tensorflow&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Stack Overflow&lt;/a&gt;.&lt;/strong&gt;&lt;/p&gt;
&lt;p&gt;The TensorFlow project strives to abide by generally accepted best practices in
open-source software development.&lt;/p&gt;
&lt;h2 id=&#34;patching-guidelines&#34;&gt;Patching guidelines
&lt;/h2&gt;&lt;p&gt;Follow these steps to patch a specific version of TensorFlow, for example, to
apply fixes to bugs or security vulnerabilities:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Clone the TensorFlow repo and switch to the corresponding branch for your
desired TensorFlow version, for example, branch &lt;code&gt;r2.8&lt;/code&gt; for version 2.8.&lt;/li&gt;
&lt;li&gt;Apply (that is, cherry-pick) the desired changes and resolve any code
conflicts.&lt;/li&gt;
&lt;li&gt;Run TensorFlow tests and ensure they pass.&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/install/source&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Build&lt;/a&gt; the TensorFlow pip
package from source.&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;continuous-build-status&#34;&gt;Continuous build status
&lt;/h2&gt;&lt;p&gt;You can find more community-supported platforms and configurations in the
&lt;a class=&#34;link&#34; href=&#34;https://github.com/tensorflow/build#community-supported-tensorflow-builds&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;TensorFlow SIG Build community builds table&lt;/a&gt;.&lt;/p&gt;
&lt;h3 id=&#34;official-builds&#34;&gt;Official Builds
&lt;/h3&gt;&lt;table&gt;
  &lt;thead&gt;
      &lt;tr&gt;
          &lt;th&gt;Build Type&lt;/th&gt;
          &lt;th&gt;Status&lt;/th&gt;
          &lt;th&gt;Artifacts&lt;/th&gt;
      &lt;/tr&gt;
  &lt;/thead&gt;
  &lt;tbody&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;strong&gt;Linux CPU&lt;/strong&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/tensorflow-kokoro-build-badges/ubuntu-cc.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://storage.googleapis.com/tensorflow-kokoro-build-badges/ubuntu-cc.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Status&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://pypi.org/project/tf-nightly/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;PyPI&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;strong&gt;Linux GPU&lt;/strong&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/tensorflow-kokoro-build-badges/ubuntu-gpu-py3.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://storage.googleapis.com/tensorflow-kokoro-build-badges/ubuntu-gpu-py3.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Status&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://pypi.org/project/tf-nightly-gpu/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;PyPI&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;strong&gt;Linux XLA&lt;/strong&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/tensorflow-kokoro-build-badges/ubuntu-xla.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://storage.googleapis.com/tensorflow-kokoro-build-badges/ubuntu-xla.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Status&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;TBA&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;strong&gt;macOS&lt;/strong&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/tensorflow-kokoro-build-badges/macos-py2-cc.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://storage.googleapis.com/tensorflow-kokoro-build-badges/macos-py2-cc.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Status&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://pypi.org/project/tf-nightly/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;PyPI&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;strong&gt;Windows CPU&lt;/strong&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/tensorflow-kokoro-build-badges/windows-cpu.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://storage.googleapis.com/tensorflow-kokoro-build-badges/windows-cpu.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Status&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://pypi.org/project/tf-nightly/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;PyPI&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;strong&gt;Windows GPU&lt;/strong&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/tensorflow-kokoro-build-badges/windows-gpu.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://storage.googleapis.com/tensorflow-kokoro-build-badges/windows-gpu.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Status&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://pypi.org/project/tf-nightly-gpu/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;PyPI&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;strong&gt;Android&lt;/strong&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/tensorflow-kokoro-build-badges/android.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://storage.googleapis.com/tensorflow-kokoro-build-badges/android.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Status&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://bintray.com/google/tensorflow/tensorflow/_latestVersion&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Download&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;strong&gt;Raspberry Pi 0 and 1&lt;/strong&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/tensorflow-kokoro-build-badges/rpi01-py3.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://storage.googleapis.com/tensorflow-kokoro-build-badges/rpi01-py3.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Status&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/tensorflow-nightly/tensorflow-1.10.0-cp34-none-linux_armv6l.whl&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Py3&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;strong&gt;Raspberry Pi 2 and 3&lt;/strong&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/tensorflow-kokoro-build-badges/rpi23-py3.html&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://storage.googleapis.com/tensorflow-kokoro-build-badges/rpi23-py3.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Status&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/tensorflow-nightly/tensorflow-1.10.0-cp34-none-linux_armv7l.whl&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Py3&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;strong&gt;Libtensorflow MacOS CPU&lt;/strong&gt;&lt;/td&gt;
          &lt;td&gt;Status Temporarily Unavailable&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/libtensorflow-nightly/prod/tensorflow/release/macos/latest/macos_cpu_libtensorflow_binaries.tar.gz&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Nightly Binary&lt;/a&gt; &lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/tensorflow/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Official GCS&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;strong&gt;Libtensorflow Linux CPU&lt;/strong&gt;&lt;/td&gt;
          &lt;td&gt;Status Temporarily Unavailable&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/libtensorflow-nightly/prod/tensorflow/release/ubuntu_16/latest/cpu/ubuntu_cpu_libtensorflow_binaries.tar.gz&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Nightly Binary&lt;/a&gt; &lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/tensorflow/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Official GCS&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;strong&gt;Libtensorflow Linux GPU&lt;/strong&gt;&lt;/td&gt;
          &lt;td&gt;Status Temporarily Unavailable&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/libtensorflow-nightly/prod/tensorflow/release/ubuntu_16/latest/gpu/ubuntu_gpu_libtensorflow_binaries.tar.gz&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Nightly Binary&lt;/a&gt; &lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/tensorflow/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Official GCS&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;strong&gt;Libtensorflow Windows CPU&lt;/strong&gt;&lt;/td&gt;
          &lt;td&gt;Status Temporarily Unavailable&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/libtensorflow-nightly/prod/tensorflow/release/windows/latest/cpu/windows_cpu_libtensorflow_binaries.tar.gz&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Nightly Binary&lt;/a&gt; &lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/tensorflow/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Official GCS&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;strong&gt;Libtensorflow Windows GPU&lt;/strong&gt;&lt;/td&gt;
          &lt;td&gt;Status Temporarily Unavailable&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/libtensorflow-nightly/prod/tensorflow/release/windows/latest/gpu/windows_gpu_libtensorflow_binaries.tar.gz&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Nightly Binary&lt;/a&gt; &lt;a class=&#34;link&#34; href=&#34;https://storage.googleapis.com/tensorflow/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Official GCS&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
  &lt;/tbody&gt;
&lt;/table&gt;
&lt;h2 id=&#34;resources&#34;&gt;Resources
&lt;/h2&gt;&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;TensorFlow.org&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/tutorials/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;TensorFlow Tutorials&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/tensorflow/models/tree/master/official&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;TensorFlow Official Models&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/tensorflow/examples&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;TensorFlow Examples&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://codelabs.developers.google.com/?cat=TensorFlow&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;TensorFlow Codelabs&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://blog.tensorflow.org&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;TensorFlow Blog&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/resources/learn-ml&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Learn ML with TensorFlow&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://twitter.com/tensorflow&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;TensorFlow Twitter&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.youtube.com/channel/UC0rqucBdTuFTjJiefW5t-IQ&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;TensorFlow YouTube&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/model_optimization/guide/roadmap&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;TensorFlow model optimization roadmap&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/about/bib&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;TensorFlow White Papers&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/tensorflow/tensorboard&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;TensorBoard Visualization Toolkit&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://cs.opensource.google/tensorflow/tensorflow&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;TensorFlow Code Search&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;Learn more about the
&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/community&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;TensorFlow community&lt;/a&gt; and how to
&lt;a class=&#34;link&#34; href=&#34;https://www.tensorflow.org/community/contribute&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;contribute&lt;/a&gt;.&lt;/p&gt;
&lt;h2 id=&#34;courses&#34;&gt;Courses
&lt;/h2&gt;&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.coursera.org/search?query=TensorFlow&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Coursera&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.udacity.com/courses/all?search=TensorFlow&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Udacity&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.edx.org/search?q=TensorFlow&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Edx&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;license&#34;&gt;License
&lt;/h2&gt;&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;LICENSE&#34; &gt;Apache License 2.0&lt;/a&gt;&lt;/p&gt;
</description>
        </item>
        <item>
        <title>cuda-python</title>
        <link>https://producthunt.programnotes.cn/en/p/cuda-python/</link>
        <pubDate>Fri, 11 Apr 2025 15:27:47 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/cuda-python/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1538558940285-e76825003c99?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NDQzNTY0MTZ8&amp;ixlib=rb-4.0.3" alt="Featured image of post cuda-python" /&gt;&lt;h1 id=&#34;nvidiacuda-python&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/NVIDIA/cuda-python&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;NVIDIA/cuda-python&lt;/a&gt;
&lt;/h1&gt;&lt;h1 id=&#34;cuda-python&#34;&gt;cuda-python
&lt;/h1&gt;&lt;p&gt;CUDA Python is the home for accessing NVIDIA’s CUDA platform from Python. It consists of multiple components:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://nvidia.github.io/cuda-python/cuda-core/latest&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;cuda.core&lt;/a&gt;: Pythonic access to CUDA Runtime and other core functionalities&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://nvidia.github.io/cuda-python/cuda-bindings/latest&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;cuda.bindings&lt;/a&gt;: Low-level Python bindings to CUDA C APIs&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://nvidia.github.io/cccl/cuda_cooperative/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;cuda.cooperative&lt;/a&gt;: A Python package providing CCCL&amp;rsquo;s reusable block-wide and warp-wide &lt;em&gt;device&lt;/em&gt; primitives for use within Numba CUDA kernels&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://nvidia.github.io/cccl/cuda_parallel/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;cuda.parallel&lt;/a&gt;: A Python package for easy access to CCCL&amp;rsquo;s highly efficient and customizable parallel algorithms, like &lt;code&gt;sort&lt;/code&gt;, &lt;code&gt;scan&lt;/code&gt;, &lt;code&gt;reduce&lt;/code&gt;, &lt;code&gt;transform&lt;/code&gt;, etc, that are callable on the &lt;em&gt;host&lt;/em&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://nvidia.github.io/numba-cuda/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;numba.cuda&lt;/a&gt;: Numba&amp;rsquo;s target for CUDA GPU programming by directly compiling a restricted subset of Python code into CUDA kernels and device functions following the CUDA execution model.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;For access to NVIDIA CPU &amp;amp; GPU Math Libraries, please refer to &lt;a class=&#34;link&#34; href=&#34;https://docs.nvidia.com/cuda/nvmath-python/latest&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;nvmath-python&lt;/a&gt;.&lt;/p&gt;
&lt;p&gt;CUDA Python is currently undergoing an overhaul to improve existing and bring up new components. All of the previously available functionalities from the &lt;code&gt;cuda-python&lt;/code&gt; package will continue to be available, please refer to the &lt;a class=&#34;link&#34; href=&#34;https://nvidia.github.io/cuda-python/cuda-bindings/latest&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;cuda.bindings&lt;/a&gt; documentation for installation guide and further detail.&lt;/p&gt;
&lt;h2 id=&#34;cuda-python-as-a-metapackage&#34;&gt;cuda-python as a metapackage
&lt;/h2&gt;&lt;p&gt;&lt;code&gt;cuda-python&lt;/code&gt; is being re-structured to become a metapackage that contains a collection of subpackages. Each subpackage is versioned independently, allowing installation of each component as needed.&lt;/p&gt;
&lt;h3 id=&#34;subpackage-cudacore&#34;&gt;Subpackage: &lt;code&gt;cuda.core&lt;/code&gt;
&lt;/h3&gt;&lt;p&gt;The &lt;code&gt;cuda.core&lt;/code&gt; package offers idiomatic, pythonic access to CUDA Runtime and other functionalities.&lt;/p&gt;
&lt;p&gt;The goals are to&lt;/p&gt;
&lt;ol&gt;
&lt;li&gt;Provide &lt;strong&gt;idiomatic (&amp;ldquo;pythonic&amp;rdquo;)&lt;/strong&gt; access to CUDA Driver, Runtime, and JIT compiler toolchain&lt;/li&gt;
&lt;li&gt;Focus on &lt;strong&gt;developer productivity&lt;/strong&gt; by ensuring end-to-end CUDA development can be performed quickly and entirely in Python&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Avoid homegrown&lt;/strong&gt; Python abstractions for CUDA for new Python GPU libraries starting from scratch&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Ease&lt;/strong&gt; developer &lt;strong&gt;burden of maintaining&lt;/strong&gt; and catching up with latest CUDA features&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Flatten the learning curve&lt;/strong&gt; for current and future generations of CUDA developers&lt;/li&gt;
&lt;/ol&gt;
&lt;h3 id=&#34;subpackage-cudabindings&#34;&gt;Subpackage: &lt;code&gt;cuda.bindings&lt;/code&gt;
&lt;/h3&gt;&lt;p&gt;The &lt;code&gt;cuda.bindings&lt;/code&gt; package is a standard set of low-level interfaces, providing full coverage of and access to the CUDA host APIs from Python.&lt;/p&gt;
&lt;p&gt;The list of available interfaces are:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;CUDA Driver&lt;/li&gt;
&lt;li&gt;CUDA Runtime&lt;/li&gt;
&lt;li&gt;NVRTC&lt;/li&gt;
&lt;li&gt;nvJitLink&lt;/li&gt;
&lt;li&gt;NVVM&lt;/li&gt;
&lt;/ul&gt;
</description>
        </item>
        
    </channel>
</rss>
