<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
    <channel>
        <title>Transformer on Producthunt daily</title>
        <link>https://producthunt.programnotes.cn/en/tags/transformer/</link>
        <description>Recent content in Transformer on Producthunt daily</description>
        <generator>Hugo -- gohugo.io</generator>
        <language>en</language>
        <lastBuildDate>Wed, 15 Oct 2025 15:29:58 +0800</lastBuildDate><atom:link href="https://producthunt.programnotes.cn/en/tags/transformer/index.xml" rel="self" type="application/rss+xml" /><item>
        <title>happy-llm</title>
        <link>https://producthunt.programnotes.cn/en/p/happy-llm/</link>
        <pubDate>Wed, 15 Oct 2025 15:29:58 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/happy-llm/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1662912711212-b07ab6c1fc45?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NjA1MTMzNTN8&amp;ixlib=rb-4.1.0" alt="Featured image of post happy-llm" /&gt;&lt;h1 id=&#34;datawhalechinahappy-llm&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/datawhalechina/happy-llm&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;datawhalechina/happy-llm&lt;/a&gt;
&lt;/h1&gt;&lt;div align=&#39;center&#39;&gt;
    &lt;img src=&#34;./images/head.jpg&#34; alt=&#34;alt text&#34; width=&#34;100%&#34;&gt;
    &lt;h1&gt;Happy-LLM&lt;/h1&gt;
&lt;/div&gt;
&lt;div align=&#34;center&#34;&gt;
  &lt;img src=&#34;https://img.shields.io/github/stars/datawhalechina/happy-llm?style=flat&amp;logo=github&#34; alt=&#34;GitHub stars&#34;/&gt;
  &lt;img src=&#34;https://img.shields.io/github/forks/datawhalechina/happy-llm?style=flat&amp;logo=github&#34; alt=&#34;GitHub forks&#34;/&gt;
  &lt;img src=&#34;https://img.shields.io/badge/language-Chinese-brightgreen?style=flat&#34; alt=&#34;Language&#34;/&gt;
  &lt;a href=&#34;https://github.com/datawhalechina/happy-llm&#34;&gt;&lt;img src=&#34;https://img.shields.io/badge/GitHub-Project-blue?style=flat&amp;logo=github&#34; alt=&#34;GitHub Project&#34;&gt;&lt;/a&gt;
  &lt;a href=&#34;https://swanlab.cn/@kmno4/Happy-LLM/overview&#34;&gt;&lt;img src=&#34;https://raw.githubusercontent.com/SwanHubX/assets/main/badge1.svg&#34; alt=&#34;SwanLab&#34;&gt;&lt;/a&gt;
&lt;/div&gt;
&lt;div align=&#34;center&#34;&gt;
  &lt;a href=&#34;https://trendshift.io/repositories/14175&#34; target=&#34;_blank&#34;&gt;&lt;img src=&#34;https://trendshift.io/api/badge/repositories/14175&#34; alt=&#34;datawhalechina%2Fhappy-llm | Trendshift&#34; style=&#34;width: 250px; height: 55px;&#34; width=&#34;250&#34; height=&#34;55&#34;/&gt;&lt;/a&gt;
&lt;/div&gt;
&lt;div align=&#34;center&#34;&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;./README.md&#34; &gt;中文&lt;/a&gt; | &lt;a class=&#34;link&#34; href=&#34;./README_en.md&#34; &gt;English&lt;/a&gt;&lt;/p&gt;
&lt;/div&gt;
&lt;div align=&#34;center&#34;&gt;
  &lt;p&gt;&lt;a href=&#34;https://datawhalechina.github.io/happy-llm/&#34;&gt;📚 在线阅读地址&lt;/a&gt;&lt;/p&gt;
  &lt;h3&gt;📚 从零开始的大语言模型原理与实践教程&lt;/h3&gt;
  &lt;p&gt;&lt;em&gt;深入理解 LLM 核心原理，动手实现你的第一个大模型&lt;/em&gt;&lt;/p&gt;
&lt;/div&gt;
&lt;hr&gt;
&lt;h2 id=&#34;-项目介绍&#34;&gt;🎯 项目介绍
&lt;/h2&gt;&lt;blockquote&gt;
&lt;p&gt;  &lt;em&gt;很多小伙伴在看完 Datawhale开源项目： &lt;a class=&#34;link&#34; href=&#34;https://github.com/datawhalechina/self-llm&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;self-llm 开源大模型食用指南&lt;/a&gt; 后，感觉意犹未尽，想要深入了解大语言模型的原理和训练过程。于是我们（Datawhale）决定推出《Happy-LLM》项目，旨在帮助大家深入理解大语言模型的原理和训练过程。&lt;/em&gt;&lt;/p&gt;
&lt;/blockquote&gt;
&lt;p&gt;  本项目是一个&lt;strong&gt;系统性的 LLM 学习教程&lt;/strong&gt;，将从 NLP 的基本研究方法出发，根据 LLM 的思路及原理逐层深入，依次为读者剖析 LLM 的架构基础和训练过程。同时，我们会结合目前 LLM 领域最主流的代码框架，演练如何亲手搭建、训练一个 LLM，期以实现授之以鱼，更授之以渔。希望大家能从这本书开始走入 LLM 的浩瀚世界，探索 LLM 的无尽可能。&lt;/p&gt;
&lt;h3 id=&#34;-你将收获什么&#34;&gt;✨ 你将收获什么？
&lt;/h3&gt;&lt;ul&gt;
&lt;li&gt;📚 &lt;strong&gt;Datawhale 开源免费&lt;/strong&gt; 完全免费的学习本项目所有内容&lt;/li&gt;
&lt;li&gt;🔍 &lt;strong&gt;深入理解&lt;/strong&gt; Transformer 架构和注意力机制&lt;/li&gt;
&lt;li&gt;📚 &lt;strong&gt;掌握&lt;/strong&gt; 预训练语言模型的基本原理&lt;/li&gt;
&lt;li&gt;🧠 &lt;strong&gt;了解&lt;/strong&gt; 现有大模型的基本结构&lt;/li&gt;
&lt;li&gt;🏗️ &lt;strong&gt;动手实现&lt;/strong&gt; 一个完整的 LLaMA2 模型&lt;/li&gt;
&lt;li&gt;⚙️ &lt;strong&gt;掌握训练&lt;/strong&gt; 从预训练到微调的全流程&lt;/li&gt;
&lt;li&gt;🚀 &lt;strong&gt;实战应用&lt;/strong&gt; RAG、Agent 等前沿技术&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;-内容导航&#34;&gt;📖 内容导航
&lt;/h2&gt;&lt;table&gt;
  &lt;thead&gt;
      &lt;tr&gt;
          &lt;th&gt;章节&lt;/th&gt;
          &lt;th&gt;关键内容&lt;/th&gt;
          &lt;th&gt;状态&lt;/th&gt;
      &lt;/tr&gt;
  &lt;/thead&gt;
  &lt;tbody&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;./docs/%e5%89%8d%e8%a8%80.md&#34; &gt;前言&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;本项目的缘起、背景及读者建议&lt;/td&gt;
          &lt;td&gt;✅&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;./docs/chapter1/%e7%ac%ac%e4%b8%80%e7%ab%a0%20NLP%e5%9f%ba%e7%a1%80%e6%a6%82%e5%bf%b5.md&#34; &gt;第一章 NLP 基础概念&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;什么是 NLP、发展历程、任务分类、文本表示演进&lt;/td&gt;
          &lt;td&gt;✅&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;./docs/chapter2/%e7%ac%ac%e4%ba%8c%e7%ab%a0%20Transformer%e6%9e%b6%e6%9e%84.md&#34; &gt;第二章 Transformer 架构&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;注意力机制、Encoder-Decoder、手把手搭建 Transformer&lt;/td&gt;
          &lt;td&gt;✅&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;./docs/chapter3/%e7%ac%ac%e4%b8%89%e7%ab%a0%20%e9%a2%84%e8%ae%ad%e7%bb%83%e8%af%ad%e8%a8%80%e6%a8%a1%e5%9e%8b.md&#34; &gt;第三章 预训练语言模型&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;Encoder-only、Encoder-Decoder、Decoder-Only 模型对比&lt;/td&gt;
          &lt;td&gt;✅&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;./docs/chapter4/%e7%ac%ac%e5%9b%9b%e7%ab%a0%20%e5%a4%a7%e8%af%ad%e8%a8%80%e6%a8%a1%e5%9e%8b.md&#34; &gt;第四章 大语言模型&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;LLM 定义、训练策略、涌现能力分析&lt;/td&gt;
          &lt;td&gt;✅&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;./docs/chapter5/%e7%ac%ac%e4%ba%94%e7%ab%a0%20%e5%8a%a8%e6%89%8b%e6%90%ad%e5%bb%ba%e5%a4%a7%e6%a8%a1%e5%9e%8b.md&#34; &gt;第五章 动手搭建大模型&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;实现 LLaMA2、训练 Tokenizer、预训练小型 LLM&lt;/td&gt;
          &lt;td&gt;✅&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;./docs/chapter6/%e7%ac%ac%e5%85%ad%e7%ab%a0%20%e5%a4%a7%e6%a8%a1%e5%9e%8b%e8%ae%ad%e7%bb%83%e6%b5%81%e7%a8%8b%e5%ae%9e%e8%b7%b5.md&#34; &gt;第六章 大模型训练实践&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;预训练、有监督微调、LoRA/QLoRA 高效微调&lt;/td&gt;
          &lt;td&gt;🚧&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;./docs/chapter7/%e7%ac%ac%e4%b8%83%e7%ab%a0%20%e5%a4%a7%e6%a8%a1%e5%9e%8b%e5%ba%94%e7%94%a8.md&#34; &gt;第七章 大模型应用&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;模型评测、RAG 检索增强、Agent 智能体&lt;/td&gt;
          &lt;td&gt;✅&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;./Extra-Chapter/&#34; &gt;Extra Chapter LLM Blog&lt;/a&gt;&lt;/td&gt;
          &lt;td&gt;优秀的大模型 学习笔记/Blog ，欢迎大家来 PR ！&lt;/td&gt;
          &lt;td&gt;🚧&lt;/td&gt;
      &lt;/tr&gt;
  &lt;/tbody&gt;
&lt;/table&gt;
&lt;h3 id=&#34;extra-chapter-llm-blog&#34;&gt;Extra Chapter LLM Blog
&lt;/h3&gt;&lt;ul&gt;
&lt;li&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;./Extra-Chapter/why-fine-tune-small-large-language-models/readme.md&#34; &gt;大模型都这么厉害了，微调0.6B的小模型有什么意义？&lt;/a&gt; @&lt;a class=&#34;link&#34; href=&#34;https://github.com/KMnO4-zx&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;不要葱姜蒜&lt;/a&gt; 2025-7-11&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;./Extra-Chapter/transformer-architecture/&#34; &gt;Transformer 整体模块设计解读&lt;/a&gt; @&lt;a class=&#34;link&#34; href=&#34;https://github.com/ditingdapeng&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;ditingdapeng&lt;/a&gt; 2025-7-14&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;./Extra-Chapter/text-data-processing/readme.md&#34; &gt;文本数据处理详解&lt;/a&gt; @&lt;a class=&#34;link&#34; href=&#34;https://github.com/xinala-781&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;蔡鋆捷&lt;/a&gt; 2025-7-14&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;./Extra-Chapter/vlm-concatenation-finetune/README.md&#34; &gt;Qwen3-&amp;ldquo;VL&amp;rdquo;——超小中文多模态模型的“拼接微调”之路&lt;/a&gt; @&lt;a class=&#34;link&#34; href=&#34;https://github.com/ShaohonChen&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;ShaohonChen&lt;/a&gt; 2025-7-30&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;./Extra-Chapter/s1-vllm-thinking-budget/readme.md&#34; &gt;S1: Thinking Budget with vLLM&lt;/a&gt; @&lt;a class=&#34;link&#34; href=&#34;https://github.com/kmno4-zx&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;kmno4-zx&lt;/a&gt; 2025-8-03&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;./Extra-Chapter/CDDRS/readme.md&#34; &gt;CDDRS: 使用细粒度语义信息指导增强的RAG检索方法&lt;/a&gt; @&lt;a class=&#34;link&#34; href=&#34;https://github.com/Hongru0306&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Hongru0306&lt;/a&gt; 2025-8-21&lt;/p&gt;
&lt;/li&gt;
&lt;/ul&gt;
&lt;blockquote&gt;
&lt;p&gt;  &lt;em&gt;如果大家在学习 Happy-LLM 项目或 LLM 相关知识中有自己独到的见解、认知、实践，欢迎大家 PR 在 &lt;a class=&#34;link&#34; href=&#34;./Extra-Chapter/&#34; &gt;Extra Chapter LLM Blog&lt;/a&gt; 中。请遵守 Extra Chapter LLM Blog 的 &lt;a class=&#34;link&#34; href=&#34;./Extra-Chapter/Readme.md&#34; &gt;PR 规范&lt;/a&gt;，我们会视 PR 内容的质量和价值来决定是否合并或补充到 Happy-LLM 正文中来。&lt;/em&gt;&lt;/p&gt;
&lt;/blockquote&gt;
&lt;h3 id=&#34;模型下载&#34;&gt;模型下载
&lt;/h3&gt;&lt;table&gt;
  &lt;thead&gt;
      &lt;tr&gt;
          &lt;th&gt;模型名称&lt;/th&gt;
          &lt;th&gt;下载地址&lt;/th&gt;
      &lt;/tr&gt;
  &lt;/thead&gt;
  &lt;tbody&gt;
      &lt;tr&gt;
          &lt;td&gt;Happy-LLM-Chapter5-Base-215M&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.modelscope.cn/models/kmno4zx/happy-llm-215M-base&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🤖 ModelScope&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;Happy-LLM-Chapter5-SFT-215M&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.modelscope.cn/models/kmno4zx/happy-llm-215M-sft&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🤖 ModelScope&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
  &lt;/tbody&gt;
&lt;/table&gt;
&lt;blockquote&gt;
&lt;p&gt;&lt;em&gt;ModelScope 创空间体验地址：&lt;a class=&#34;link&#34; href=&#34;https://www.modelscope.cn/studios/kmno4zx/happy_llm_215M_sft&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;🤖 创空间&lt;/a&gt;&lt;/em&gt;&lt;/p&gt;
&lt;/blockquote&gt;
&lt;h3 id=&#34;pdf-版本下载&#34;&gt;PDF 版本下载
&lt;/h3&gt;&lt;p&gt;  &lt;em&gt;&lt;strong&gt;本 Happy-LLM PDF 教程完全开源免费。为防止各类营销号加水印后贩卖给大模型初学者，我们特地在 PDF 文件中预先添加了不影响阅读的 Datawhale 开源标志水印，敬请谅解～&lt;/strong&gt;&lt;/em&gt;&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;&lt;em&gt;Happy-LLM PDF : &lt;a class=&#34;link&#34; href=&#34;https://github.com/datawhalechina/happy-llm/releases/tag/v1.0.1&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;https://github.com/datawhalechina/happy-llm/releases/tag/v1.0.1&lt;/a&gt;&lt;/em&gt;&lt;/p&gt;
&lt;/blockquote&gt;
&lt;h2 id=&#34;-如何学习&#34;&gt;💡 如何学习
&lt;/h2&gt;&lt;p&gt;  本项目适合大学生、研究人员、LLM 爱好者。在学习本项目之前，建议具备一定的编程经验，尤其是要对 Python 编程语言有一定的了解。最好具备深度学习的相关知识，并了解 NLP 领域的相关概念和术语，以便更轻松地学习本项目。&lt;/p&gt;
&lt;p&gt;  本项目分为两部分——基础知识与实战应用。第1章～第4章是基础知识部分，从浅入深介绍 LLM 的基本原理。其中，第1章简单介绍 NLP 的基本任务和发展，为非 NLP 领域研究者提供参考；第2章介绍 LLM 的基本架构——Transformer，包括原理介绍及代码实现，作为 LLM 最重要的理论基础；第3章整体介绍经典的 PLM，包括 Encoder-Only、Encoder-Decoder 和 Decoder-Only 三种架构，也同时介绍了当前一些主流 LLM 的架构和思想；第4章则正式进入 LLM 部分，详细介绍 LLM 的特点、能力和整体训练过程。第5章～第7章是实战应用部分，将逐步带领大家深入 LLM 的底层细节。其中，第5章将带领大家者基于 PyTorch 层亲手搭建一个 LLM，并实现预训练、有监督微调的全流程；第6章将引入目前业界主流的 LLM 训练框架 Transformers，带领学习者基于该框架快速、高效地实现 LLM 训练过程；第7章则将介绍 基于 LLM 的各种应用，补全学习者对 LLM 体系的认知，包括 LLM 的评测、检索增强生成（Retrieval-Augmented Generation，RAG）、智能体（Agent）的思想和简单实现。你可以根据个人兴趣和需求，选择性地阅读相关章节。&lt;/p&gt;
&lt;p&gt;  在阅读本书的过程中，建议你将理论和实际相结合。LLM 是一个快速发展、注重实践的领域，我们建议你多投入实战，复现本书提供的各种代码，同时积极参加 LLM 相关的项目与比赛，真正投入到 LLM 开发的浪潮中。我们鼓励你关注 Datawhale 及其他 LLM 相关开源社区，当遇到问题时，你可以随时在本项目的 issue 区提问。&lt;/p&gt;
&lt;p&gt;  最后，欢迎每一位读者在学习完本项目后加入到 LLM 开发者的行列。作为国内 AI 开源社区，我们希望充分聚集共创者，一起丰富这个开源 LLM 的世界，打造更多、更全面特色 LLM 的教程。星火点点，汇聚成海。我们希望成为 LLM 与普罗大众的阶梯，以自由、平等的开源精神，拥抱更恢弘而辽阔的 LLM 世界。&lt;/p&gt;
&lt;h2 id=&#34;-如何贡献&#34;&gt;🤝 如何贡献
&lt;/h2&gt;&lt;p&gt;我们欢迎任何形式的贡献！&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;🐛 &lt;strong&gt;报告 Bug&lt;/strong&gt; - 发现问题请提交 Issue&lt;/li&gt;
&lt;li&gt;💡 &lt;strong&gt;功能建议&lt;/strong&gt; - 有好想法就告诉我们&lt;/li&gt;
&lt;li&gt;📝 &lt;strong&gt;内容完善&lt;/strong&gt; - 帮助改进教程内容&lt;/li&gt;
&lt;li&gt;🔧 &lt;strong&gt;代码优化&lt;/strong&gt; - 提交 Pull Request&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;-致谢&#34;&gt;🙏 致谢
&lt;/h2&gt;&lt;h3 id=&#34;核心贡献者&#34;&gt;核心贡献者
&lt;/h3&gt;&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/KMnO4-zx&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;宋志学-项目负责人&lt;/a&gt; (Datawhale成员-中国矿业大学(北京))&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/logan-zou&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;邹雨衡-项目负责人&lt;/a&gt; (Datawhale成员-对外经济贸易大学)&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://xinzhongzhu.github.io/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;朱信忠-指导专家&lt;/a&gt;（Datawhale首席科学家-浙江师范大学杭州人工智能研究院教授）&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;extra-chapter-贡献者&#34;&gt;Extra-Chapter 贡献者
&lt;/h3&gt;&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/ditingdapeng&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;ditingdapeng&lt;/a&gt;（内容贡献者-云原生基础架构工程师）&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/xinala-781&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;蔡鋆捷&lt;/a&gt;（内容贡献者-福州大学）&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/ShaohonChen&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;ShaohonChen&lt;/a&gt; （情感机器实验室研究员-西安电子科技大学在读硕士）&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/Hongru0306&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;肖鸿儒, 庄健琨&lt;/a&gt; (内容贡献者-同济大学)&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;特别感谢&#34;&gt;特别感谢
&lt;/h3&gt;&lt;ul&gt;
&lt;li&gt;感谢 &lt;a class=&#34;link&#34; href=&#34;https://github.com/Sm1les&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;@Sm1les&lt;/a&gt; 对本项目的帮助与支持&lt;/li&gt;
&lt;li&gt;感谢所有为本项目做出贡献的开发者们 ❤️&lt;/li&gt;
&lt;/ul&gt;
&lt;div align=center style=&#34;margin-top: 30px;&#34;&gt;
  &lt;a href=&#34;https://github.com/datawhalechina/happy-llm/graphs/contributors&#34;&gt;
    &lt;img src=&#34;https://contrib.rocks/image?repo=datawhalechina/happy-llm&#34; /&gt;
  &lt;/a&gt;
&lt;/div&gt;
&lt;h2 id=&#34;star-history&#34;&gt;Star History
&lt;/h2&gt;&lt;div align=&#39;center&#39;&gt;
    &lt;img src=&#34;./images/star-history-2025710.png&#34; alt=&#34;Datawhale&#34; width=&#34;90%&#34;&gt;
&lt;/div&gt;
&lt;div align=&#34;center&#34;&gt;
  &lt;p&gt;⭐ 如果这个项目对你有帮助，请给我们一个 Star！&lt;/p&gt;
&lt;/div&gt;
&lt;h2 id=&#34;关于-datawhale&#34;&gt;关于 Datawhale
&lt;/h2&gt;&lt;div align=&#39;center&#39;&gt;
    &lt;img src=&#34;./images/datawhale.png&#34; alt=&#34;Datawhale&#34; width=&#34;30%&#34;&gt;
    &lt;p&gt;扫描二维码关注 Datawhale 公众号，获取更多优质开源内容&lt;/p&gt;
&lt;/div&gt;
&lt;hr&gt;
&lt;h2 id=&#34;-开源协议&#34;&gt;📜 开源协议
&lt;/h2&gt;&lt;p&gt;本作品采用&lt;a class=&#34;link&#34; href=&#34;http://creativecommons.org/licenses/by-nc-sa/4.0/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;知识共享署名-非商业性使用-相同方式共享 4.0 国际许可协议&lt;/a&gt;进行许可。&lt;/p&gt;
</description>
        </item>
        <item>
        <title>nn-zero-to-hero</title>
        <link>https://producthunt.programnotes.cn/en/p/nn-zero-to-hero/</link>
        <pubDate>Fri, 29 Aug 2025 15:27:57 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/nn-zero-to-hero/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1637160691421-e66ac6194e2b?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NTY0NTI0MTZ8&amp;ixlib=rb-4.1.0" alt="Featured image of post nn-zero-to-hero" /&gt;&lt;h1 id=&#34;karpathynn-zero-to-hero&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/karpathy/nn-zero-to-hero&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;karpathy/nn-zero-to-hero&lt;/a&gt;
&lt;/h1&gt;&lt;h2 id=&#34;neural-networks-zero-to-hero&#34;&gt;Neural Networks: Zero to Hero
&lt;/h2&gt;&lt;p&gt;A course on neural networks that starts all the way at the basics. The course is a series of YouTube videos where we code and train neural networks together. The Jupyter notebooks we build in the videos are then captured here inside the &lt;a class=&#34;link&#34; href=&#34;lectures/&#34; &gt;lectures&lt;/a&gt; directory. Every lecture also has a set of exercises included in the video description. (This may grow into something more respectable).&lt;/p&gt;
&lt;hr&gt;
&lt;p&gt;&lt;strong&gt;Lecture 1: The spelled-out intro to neural networks and backpropagation: building micrograd&lt;/strong&gt;&lt;/p&gt;
&lt;p&gt;Backpropagation and training of neural networks. Assumes basic knowledge of Python and a vague recollection of calculus from high school.&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.youtube.com/watch?v=VMj-3S1tku0&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;YouTube video lecture&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;lectures/micrograd&#34; &gt;Jupyter notebook files&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/karpathy/micrograd&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;micrograd Github repo&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;hr&gt;
&lt;p&gt;&lt;strong&gt;Lecture 2: The spelled-out intro to language modeling: building makemore&lt;/strong&gt;&lt;/p&gt;
&lt;p&gt;We implement a bigram character-level language model, which we will further complexify in followup videos into a modern Transformer language model, like GPT. In this video, the focus is on (1) introducing torch.Tensor and its subtleties and use in efficiently evaluating neural networks and (2) the overall framework of language modeling that includes model training, sampling, and the evaluation of a loss (e.g. the negative log likelihood for classification).&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.youtube.com/watch?v=PaCmpygFfXo&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;YouTube video lecture&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;lectures/makemore/makemore_part1_bigrams.ipynb&#34; &gt;Jupyter notebook files&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/karpathy/makemore&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;makemore Github repo&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;hr&gt;
&lt;p&gt;&lt;strong&gt;Lecture 3: Building makemore Part 2: MLP&lt;/strong&gt;&lt;/p&gt;
&lt;p&gt;We implement a multilayer perceptron (MLP) character-level language model. In this video we also introduce many basics of machine learning (e.g. model training, learning rate tuning, hyperparameters, evaluation, train/dev/test splits, under/overfitting, etc.).&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://youtu.be/TCH_1BHY58I&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;YouTube video lecture&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;lectures/makemore/makemore_part2_mlp.ipynb&#34; &gt;Jupyter notebook files&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/karpathy/makemore&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;makemore Github repo&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;hr&gt;
&lt;p&gt;&lt;strong&gt;Lecture 4: Building makemore Part 3: Activations &amp;amp; Gradients, BatchNorm&lt;/strong&gt;&lt;/p&gt;
&lt;p&gt;We dive into some of the internals of MLPs with multiple layers and scrutinize the statistics of the forward pass activations, backward pass gradients, and some of the pitfalls when they are improperly scaled. We also look at the typical diagnostic tools and visualizations you&amp;rsquo;d want to use to understand the health of your deep network. We learn why training deep neural nets can be fragile and introduce the first modern innovation that made doing so much easier: Batch Normalization. Residual connections and the Adam optimizer remain notable todos for later video.&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://youtu.be/P6sfmUTpUmc&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;YouTube video lecture&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;lectures/makemore/makemore_part3_bn.ipynb&#34; &gt;Jupyter notebook files&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/karpathy/makemore&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;makemore Github repo&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;hr&gt;
&lt;p&gt;&lt;strong&gt;Lecture 5: Building makemore Part 4: Becoming a Backprop Ninja&lt;/strong&gt;&lt;/p&gt;
&lt;p&gt;We take the 2-layer MLP (with BatchNorm) from the previous video and backpropagate through it manually without using PyTorch autograd&amp;rsquo;s loss.backward(). That is, we backprop through the cross entropy loss, 2nd linear layer, tanh, batchnorm, 1st linear layer, and the embedding table. Along the way, we get an intuitive understanding about how gradients flow backwards through the compute graph and on the level of efficient Tensors, not just individual scalars like in micrograd. This helps build competence and intuition around how neural nets are optimized and sets you up to more confidently innovate on and debug modern neural networks.&lt;/p&gt;
&lt;p&gt;I recommend you work through the exercise yourself but work with it in tandem and whenever you are stuck unpause the video and see me give away the answer. This video is not super intended to be simply watched. The exercise is &lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com/drive/1WV2oi2fh9XXyldh02wupFQX0wh5ZC-z-?usp=sharing&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;here as a Google Colab&lt;/a&gt;. Good luck :)&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://youtu.be/q8SA3rM6ckI&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;YouTube video lecture&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;lectures/makemore/makemore_part4_backprop.ipynb&#34; &gt;Jupyter notebook files&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/karpathy/makemore&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;makemore Github repo&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;hr&gt;
&lt;p&gt;&lt;strong&gt;Lecture 6: Building makemore Part 5: Building WaveNet&lt;/strong&gt;&lt;/p&gt;
&lt;p&gt;We take the 2-layer MLP from previous video and make it deeper with a tree-like structure, arriving at a convolutional neural network architecture similar to the WaveNet (2016) from DeepMind. In the WaveNet paper, the same hierarchical architecture is implemented more efficiently using causal dilated convolutions (not yet covered). Along the way we get a better sense of torch.nn and what it is and how it works under the hood, and what a typical deep learning development process looks like (a lot of reading of documentation, keeping track of multidimensional tensor shapes, moving between jupyter notebooks and repository code, &amp;hellip;).&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://youtu.be/t3YJ5hKiMQ0&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;YouTube video lecture&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;lectures/makemore/makemore_part5_cnn1.ipynb&#34; &gt;Jupyter notebook files&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;hr&gt;
&lt;p&gt;&lt;strong&gt;Lecture 7: Let&amp;rsquo;s build GPT: from scratch, in code, spelled out.&lt;/strong&gt;&lt;/p&gt;
&lt;p&gt;We build a Generatively Pretrained Transformer (GPT), following the paper &amp;ldquo;Attention is All You Need&amp;rdquo; and OpenAI&amp;rsquo;s GPT-2 / GPT-3. We talk about connections to ChatGPT, which has taken the world by storm. We watch GitHub Copilot, itself a GPT, help us write a GPT (meta :D!) . I recommend people watch the earlier makemore videos to get comfortable with the autoregressive language modeling framework and basics of tensors and PyTorch nn, which we take for granted in this video.&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.youtube.com/watch?v=kCc8FmEb1nY&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;YouTube video lecture&lt;/a&gt;. For all other links see the video description.&lt;/li&gt;
&lt;/ul&gt;
&lt;hr&gt;
&lt;p&gt;&lt;strong&gt;Lecture 8: Let&amp;rsquo;s build the GPT Tokenizer&lt;/strong&gt;&lt;/p&gt;
&lt;p&gt;The Tokenizer is a necessary and pervasive component of Large Language Models (LLMs), where it translates between strings and tokens (text chunks). Tokenizers are a completely separate stage of the LLM pipeline: they have their own training sets, training algorithms (Byte Pair Encoding), and after training implement two fundamental functions: encode() from strings to tokens, and decode() back from tokens to strings. In this lecture we build from scratch the Tokenizer used in the GPT series from OpenAI. In the process, we will see that a lot of weird behaviors and problems of LLMs actually trace back to tokenization. We&amp;rsquo;ll go through a number of these issues, discuss why tokenization is at fault, and why someone out there ideally finds a way to delete this stage entirely.&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.youtube.com/watch?v=zduSFxRajkE&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;YouTube video lecture&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/karpathy/minbpe&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;minBPE code&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com/drive/1y0KnCFZvGVf_odSfcNAws6kcDD7HsI0L?usp=sharing&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Google Colab&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;hr&gt;
&lt;p&gt;Ongoing&amp;hellip;&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;License&lt;/strong&gt;&lt;/p&gt;
&lt;p&gt;MIT&lt;/p&gt;
</description>
        </item>
        <item>
        <title>Hands-On-Large-Language-Models</title>
        <link>https://producthunt.programnotes.cn/en/p/hands-on-large-language-models/</link>
        <pubDate>Wed, 27 Aug 2025 15:29:45 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/hands-on-large-language-models/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1733939910552-7752db0c03d0?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NTYyNzk2MTd8&amp;ixlib=rb-4.1.0" alt="Featured image of post Hands-On-Large-Language-Models" /&gt;&lt;h1 id=&#34;handsonllmhands-on-large-language-models&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/HandsOnLLM/Hands-On-Large-Language-Models&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;HandsOnLLM/Hands-On-Large-Language-Models&lt;/a&gt;
&lt;/h1&gt;&lt;p&gt;﻿# Hands-On Large Language Models&lt;/p&gt;
&lt;p&gt;&lt;a href=&#34;https://www.linkedin.com/in/jalammar/&#34;&gt;&lt;img src=&#34;https://img.shields.io/badge/Follow%20Jay-blue.svg?logo=linkedin&#34;&gt;&lt;/a&gt;
&lt;a href=&#34;https://www.linkedin.com/in/mgrootendorst/&#34;&gt;&lt;img src=&#34;https://img.shields.io/badge/Follow%20Maarten-blue.svg?logo=linkedin&#34;&gt;&lt;/a&gt;
&lt;a href=&#34;https://www.deeplearning.ai/short-courses/how-transformer-llms-work/?utm_campaign=handsonllm-launch&amp;utm_medium=partner&#34;&gt;&lt;img src=&#34;https://img.shields.io/badge/DeepLearning.AI%20Course-NEW!-&amp;labelColor=black&amp;color=red.svg?logo=data:image/svg%2bxml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAuMDAwMzY1MjgxIC0wLjAwMDE0MDE0MiAzMy4yOSAzMy4xNSI+Cgk8cGF0aCBkPSJNMTYuNjQzIDMzLjE0NWMtMy4yOTIgMC02LjUxLS45NzItOS4yNDYtMi43OTNhMTYuNTg4IDE2LjU4OCAwIDAxLTYuMTMtNy40MzhBMTYuNTA3IDE2LjUwNyAwIDAxLjMyIDEzLjM0YTE2LjU1IDE2LjU1IDAgMDE0LjU1NS04LjQ4NUExNi42NjUgMTYuNjY1IDAgMDExMy4zOTYuMzE4YTE2LjcxIDE2LjcxIDAgMDE5LjYxNi45NDQgMTYuNjI4IDE2LjYyOCAwIDAxNy40NyA2LjEwMyAxNi41MjIgMTYuNTIyIDAgMDEyLjgwNCA5LjIwN2MwIDQuMzk2LTEuNzUzIDguNjEtNC44NzQgMTEuNzE5YTE2LjY4IDE2LjY4IDAgMDEtMTEuNzY5IDQuODU0em0uMTI1LTYuNjI4YzYuOTA2IDAgMTIuNTE3LTUuNjk4IDEyLjUxNy0xMi43MyAwLTcuMDMtNS42MS0xMi43MjUtMTIuNTE3LTEyLjcyNS02LjkwNiAwLTEyLjUxNyA1LjY5OC0xMi41MTcgMTIuNzI1IDAgNy4wMjcgNS42MTEgMTIuNzMgMTIuNTE3IDEyLjczem0tLjEyNS0yLjkxOGMtNi4yODkgMC0xMS4zODYtNC45MjUtMTEuMzg2LTExLjAwMkM1LjI1NyA2LjUyIDEwLjM2IDEuNTkgMTYuNjQzIDEuNTljNi4yODQgMCAxMS4zODYgNC45MyAxMS4zODYgMTEuMDA3cy01LjA5NyAxMS4wMDItMTEuMzg2IDExLjAwMnptLS4yNDItNC41MDhjNC43NyAwIDguNjMzLTMuNjc5IDguNjMzLTguMjE4IDAtNC41MzgtMy44ODUtOC4yMjEtOC42MzMtOC4yMjEtNC43NDcgMC04LjYzMiAzLjY3OS04LjYzMiA4LjIyMSAwIDQuNTQzIDMuODg1IDguMjE4IDguNjMyIDguMjE4eiIgZmlsbD0iI0ZENEE2MSIvPgo8L3N2Zz4=&#34;&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;Welcome! In this repository you will find the code for all examples throughout the book &lt;a class=&#34;link&#34; href=&#34;https://www.amazon.com/Hands-Large-Language-Models-Understanding/dp/1098150961&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Hands-On Large Language Models&lt;/a&gt; written by &lt;a class=&#34;link&#34; href=&#34;https://www.linkedin.com/in/jalammar/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Jay Alammar&lt;/a&gt; and &lt;a class=&#34;link&#34; href=&#34;https://www.linkedin.com/in/mgrootendorst/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Maarten Grootendorst&lt;/a&gt; which we playfully dubbed: &lt;br&gt;&lt;/p&gt;
&lt;p align=&#34;center&#34;&gt;&lt;b&gt;&lt;i&gt;&#34;The Illustrated LLM Book&#34;&lt;/i&gt;&lt;/b&gt;&lt;/p&gt;
&lt;p&gt;Through the visually educational nature of this book and with &lt;strong&gt;almost 300 custom made figures&lt;/strong&gt;, learn the practical tools and concepts you need to use Large Language Models today!&lt;/p&gt;
&lt;p&gt;&lt;a href=&#34;https://www.amazon.com/Hands-Large-Language-Models-Understanding/dp/1098150961&#34;&gt;&lt;img src=&#34;images/book_cover.png&#34; width=&#34;50%&#34; &gt;&lt;/a&gt;&lt;/p&gt;
&lt;br&gt;
&lt;p&gt;The book is available on:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.amazon.com/Hands-Large-Language-Models-Understanding/dp/1098150961&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Amazon&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.shroffpublishers.com/books/computer-science/large-language-models/9789355425522/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Shroff Publishers (India)&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.oreilly.com/library/view/hands-on-large-language/9781098150952/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;O&amp;rsquo;Reilly&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.amazon.com/Hands-Large-Language-Models-Alammar-ebook/dp/B0DGZ46G88/ref=tmm_kin_swatch_0?_encoding=UTF8&amp;amp;qid=&amp;amp;sr=&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Kindle&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.barnesandnoble.com/w/hands-on-large-language-models-jay-alammar/1145185960&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Barnes and Noble&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.goodreads.com/book/show/210408850-hands-on-large-language-models&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Goodreads&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;table-of-contents&#34;&gt;Table of Contents
&lt;/h2&gt;&lt;p&gt;We advise to run all examples through Google Colab for the easiest setup. Google Colab allows you to use a T4 GPU with 16GB of VRAM for free. All examples were mainly built and tested using Google Colab, so it should be the most stable platform. However, any other cloud provider should work.&lt;/p&gt;
&lt;table&gt;
  &lt;thead&gt;
      &lt;tr&gt;
          &lt;th&gt;Chapter&lt;/th&gt;
          &lt;th&gt;Notebook&lt;/th&gt;
      &lt;/tr&gt;
  &lt;/thead&gt;
  &lt;tbody&gt;
      &lt;tr&gt;
          &lt;td&gt;Chapter 1: Introduction to Language Models&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com/github/HandsOnLLM/Hands-On-Large-Language-Models/blob/main/chapter01/Chapter%201%20-%20Introduction%20to%20Language%20Models.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://colab.research.google.com/assets/colab-badge.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Open In Colab&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;Chapter 2: Tokens and Embeddings&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com/github/HandsOnLLM/Hands-On-Large-Language-Models/blob/main/chapter02/Chapter%202%20-%20Tokens%20and%20Token%20Embeddings.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://colab.research.google.com/assets/colab-badge.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Open In Colab&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;Chapter 3: Looking Inside Transformer LLMs&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com/github/HandsOnLLM/Hands-On-Large-Language-Models/blob/main/chapter03/Chapter%203%20-%20Looking%20Inside%20LLMs.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://colab.research.google.com/assets/colab-badge.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Open In Colab&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;Chapter 4: Text Classification&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com/github/HandsOnLLM/Hands-On-Large-Language-Models/blob/main/chapter04/Chapter%204%20-%20Text%20Classification.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://colab.research.google.com/assets/colab-badge.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Open In Colab&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;Chapter 5: Text Clustering and Topic Modeling&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com/github/HandsOnLLM/Hands-On-Large-Language-Models/blob/main/chapter05/Chapter%205%20-%20Text%20Clustering%20and%20Topic%20Modeling.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://colab.research.google.com/assets/colab-badge.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Open In Colab&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;Chapter 6: Prompt Engineering&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com/github/HandsOnLLM/Hands-On-Large-Language-Models/blob/main/chapter06/Chapter%206%20-%20Prompt%20Engineering.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://colab.research.google.com/assets/colab-badge.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Open In Colab&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;Chapter 7: Advanced Text Generation Techniques and Tools&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com/github/HandsOnLLM/Hands-On-Large-Language-Models/blob/main/chapter07/Chapter%207%20-%20Advanced%20Text%20Generation%20Techniques%20and%20Tools.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://colab.research.google.com/assets/colab-badge.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Open In Colab&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;Chapter 8: Semantic Search and Retrieval-Augmented Generation&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com/github/HandsOnLLM/Hands-On-Large-Language-Models/blob/main/chapter08/Chapter%208%20-%20Semantic%20Search.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://colab.research.google.com/assets/colab-badge.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Open In Colab&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;Chapter 9: Multimodal Large Language Models&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com/github/HandsOnLLM/Hands-On-Large-Language-Models/blob/main/chapter09/Chapter%209%20-%20Multimodal%20Large%20Language%20Models.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://colab.research.google.com/assets/colab-badge.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Open In Colab&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;Chapter 10: Creating Text Embedding Models&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com/github/HandsOnLLM/Hands-On-Large-Language-Models/blob/main/chapter10/Chapter%2010%20-%20Creating%20Text%20Embedding%20Models.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://colab.research.google.com/assets/colab-badge.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Open In Colab&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;Chapter 11: Fine-tuning Representation Models for Classification&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com/github/HandsOnLLM/Hands-On-Large-Language-Models/blob/main/chapter11/Chapter%2011%20-%20Fine-Tuning%20BERT.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://colab.research.google.com/assets/colab-badge.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Open In Colab&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td&gt;Chapter 12: Fine-tuning Generation Models&lt;/td&gt;
          &lt;td&gt;&lt;a class=&#34;link&#34; href=&#34;https://colab.research.google.com/github/HandsOnLLM/Hands-On-Large-Language-Models/blob/main/chapter12/Chapter%2012%20-%20Fine-tuning%20Generation%20Models.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://colab.research.google.com/assets/colab-badge.svg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Open In Colab&#34;
	
	
&gt;&lt;/a&gt;&lt;/td&gt;
      &lt;/tr&gt;
  &lt;/tbody&gt;
&lt;/table&gt;
&lt;blockquote&gt;
&lt;p&gt;[!TIP]
You can check the &lt;a class=&#34;link&#34; href=&#34;.setup/&#34; &gt;setup&lt;/a&gt; folder for a quick-start guide to install all packages locally and you can check the &lt;a class=&#34;link&#34; href=&#34;.setup/conda/&#34; &gt;conda&lt;/a&gt; folder for a complete guide on how to setup your environment, including conda and PyTorch installation.
Note that the depending on your OS, Python version, and dependencies your results might be slightly differ. However, they
should this be similar to the examples in the book.&lt;/p&gt;
&lt;/blockquote&gt;
&lt;h2 id=&#34;reviews&#34;&gt;Reviews
&lt;/h2&gt;&lt;blockquote&gt;
&lt;p&gt;&amp;ldquo;&lt;em&gt;Jay and Maarten have continued their tradition of providing beautifully illustrated and insightful descriptions of complex topics in their new book. Bolstered with working code, timelines, and references to key papers, their book is a valuable resource for anyone looking to understand the main techniques behind how Large Language Models are built.&lt;/em&gt;&amp;rdquo;&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;Andrew Ng&lt;/strong&gt; - founder of &lt;a class=&#34;link&#34; href=&#34;https://www.deeplearning.ai/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;DeepLearning.AI&lt;/a&gt;&lt;/p&gt;
&lt;/blockquote&gt;
&lt;hr&gt;
&lt;blockquote&gt;
&lt;p&gt;&amp;ldquo;&lt;em&gt;This is an exceptional guide to the world of language models and their practical applications in industry. Its highly-visual coverage of generative, representational, and retrieval applications of language models empowers readers to quickly understand, use, and refine LLMs. Highly recommended!&lt;/em&gt;&amp;rdquo;&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;Nils Reimers&lt;/strong&gt; - Director of Machine Learning at Cohere | creator of &lt;a class=&#34;link&#34; href=&#34;https://github.com/UKPLab/sentence-transformers&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;sentence-transformers&lt;/a&gt;&lt;/p&gt;
&lt;/blockquote&gt;
&lt;hr&gt;
&lt;blockquote&gt;
&lt;p&gt;&amp;ldquo;&lt;em&gt;I can’t think of another book that is more important to read right now. On every single page, I learned something that is critical to success in this era of language models.&lt;/em&gt;&amp;rdquo;&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;Josh Starmer&lt;/strong&gt; - &lt;a class=&#34;link&#34; href=&#34;https://www.youtube.com/channel/UCtYLUTtgS3k1Fg4y5tAhLbw&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;StatQuest&lt;/a&gt;&lt;/p&gt;
&lt;/blockquote&gt;
&lt;hr&gt;
&lt;blockquote&gt;
&lt;p&gt;&amp;ldquo;&lt;em&gt;If you’re looking to get up to speed in everything regarding LLMs, look no further! In this wonderful book, Jay and Maarten will take you from zero to expert in the history and latest advances in large language models. With very intuitive explanations, great real-life examples, clear illustrations, and comprehensive code labs, this book lifts the curtain on the complexities of transformer models, tokenizers, semantic search, RAG, and many other cutting-edge technologies. A must read for anyone interested in the latest AI technology!&lt;/em&gt;&amp;rdquo;&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;Luis Serrano, PhD&lt;/strong&gt; - Founder and CEO of &lt;a class=&#34;link&#34; href=&#34;https://www.youtube.com/@SerranoAcademy&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Serrano Academy&lt;/a&gt;&lt;/p&gt;
&lt;/blockquote&gt;
&lt;hr&gt;
&lt;blockquote&gt;
&lt;p&gt;&amp;ldquo;&lt;em&gt;Hands-On Large Language Models brings clarity and practical examples to cut through the hype of AI. It provides a wealth of great diagrams and visual aids to supplement the clear explanations. The worked examples and code make concrete what other books leave abstract. The book starts with simple introductory beginnings, and steadily builds in scope. By the final chapters, you will be fine-tuning and building your own large language models with confidence.&lt;/em&gt;&amp;rdquo;&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;Leland McInnes&lt;/strong&gt; - Researcher at the Tutte Institute for Mathematics and Computing | creator of &lt;a class=&#34;link&#34; href=&#34;https://github.com/lmcinnes/umap&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;UMAP&lt;/a&gt; and &lt;a class=&#34;link&#34; href=&#34;https://github.com/scikit-learn-contrib/hdbscan&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;HDBSCAN&lt;/a&gt;&lt;/p&gt;
&lt;/blockquote&gt;
&lt;hr&gt;
&lt;h2 id=&#34;bonus-content&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;bonus/&#34; &gt;Bonus content!&lt;/a&gt;
&lt;/h2&gt;&lt;p&gt;We attempted to put as much information into the book without it being overwhelming. However, even with a 400-page book there is still much to discover!&lt;/p&gt;
&lt;p&gt;We continue to create more guides that compliment the book and go more in-depth into new and &lt;a class=&#34;link&#34; href=&#34;%28bonus/%29&#34; &gt;exciting topics&lt;/a&gt;:&lt;/p&gt;
&lt;table&gt;
  &lt;thead&gt;
      &lt;tr&gt;
          &lt;th style=&#34;text-align: center&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://newsletter.maartengrootendorst.com/p/a-visual-guide-to-mamba-and-state&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;A Visual Guide to Mamba&lt;/a&gt;&lt;/th&gt;
          &lt;th style=&#34;text-align: center&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://newsletter.maartengrootendorst.com/p/a-visual-guide-to-quantization&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;A Visual Guide to Quantization&lt;/a&gt;&lt;/th&gt;
          &lt;th style=&#34;text-align: center&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://jalammar.github.io/illustrated-stable-diffusion/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;The Illustrated Stable Diffusion&lt;/a&gt;&lt;/th&gt;
      &lt;/tr&gt;
  &lt;/thead&gt;
  &lt;tbody&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;&lt;img src=&#34;https://producthunt.programnotes.cn/images/mamba.png&#34;
	
	
	
	loading=&#34;lazy&#34;
	
	
&gt;&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;&lt;img src=&#34;https://producthunt.programnotes.cn/images/quant.png&#34;
	
	
	
	loading=&#34;lazy&#34;
	
	
&gt;&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;&lt;img src=&#34;https://producthunt.programnotes.cn/images/diffusion.png&#34;
	
	
	
	loading=&#34;lazy&#34;
	
	
&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;&lt;strong&gt;&lt;a class=&#34;link&#34; href=&#34;https://newsletter.maartengrootendorst.com/p/a-visual-guide-to-mixture-of-experts&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;A Visual Guide to Mixture of Experts&lt;/a&gt;&lt;/strong&gt;&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;&lt;strong&gt;&lt;a class=&#34;link&#34; href=&#34;https://newsletter.maartengrootendorst.com/p/a-visual-guide-to-reasoning-llms&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;A Visual Guide to Reasoning LLMs&lt;/a&gt;&lt;/strong&gt;&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;&lt;strong&gt;&lt;a class=&#34;link&#34; href=&#34;https://newsletter.languagemodels.co/p/the-illustrated-deepseek-r1&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;The Illustrated DeepSeek-R1&lt;/a&gt;&lt;/strong&gt;&lt;/td&gt;
      &lt;/tr&gt;
      &lt;tr&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;&lt;img src=&#34;https://producthunt.programnotes.cn/images/moe.png&#34;
	
	
	
	loading=&#34;lazy&#34;
	
	
&gt;&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;&lt;img src=&#34;https://producthunt.programnotes.cn/images/reasoning.png&#34;
	
	
	
	loading=&#34;lazy&#34;
	
	
&gt;&lt;/td&gt;
          &lt;td style=&#34;text-align: center&#34;&gt;&lt;img src=&#34;https://producthunt.programnotes.cn/images/deepseek.png&#34;
	
	
	
	loading=&#34;lazy&#34;
	
	
&gt;&lt;/td&gt;
      &lt;/tr&gt;
  &lt;/tbody&gt;
&lt;/table&gt;
&lt;h2 id=&#34;citation&#34;&gt;Citation
&lt;/h2&gt;&lt;p&gt;Please consider citing the book if you consider it useful for your research:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;6
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;7
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;8
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;9
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-fallback&#34; data-lang=&#34;fallback&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;@book{hands-on-llms-book,
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  author       = {Jay Alammar and Maarten Grootendorst},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  title        = {Hands-On Large Language Models},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  publisher    = {O&amp;#39;Reilly},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  year         = {2024},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  isbn         = {978-1098150969},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  url          = {https://www.oreilly.com/library/view/hands-on-large-language/9781098150952/},
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  github       = {https://github.com/HandsOnLLM/Hands-On-Large-Language-Models}
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;}
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;</description>
        </item>
        
    </channel>
</rss>
