<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
    <channel>
        <title>Summarization on Producthunt daily</title>
        <link>https://producthunt.programnotes.cn/en/tags/summarization/</link>
        <description>Recent content in Summarization on Producthunt daily</description>
        <generator>Hugo -- gohugo.io</generator>
        <language>en</language>
        <lastBuildDate>Tue, 22 Jul 2025 15:33:16 +0800</lastBuildDate><atom:link href="https://producthunt.programnotes.cn/en/tags/summarization/index.xml" rel="self" type="application/rss+xml" /><item>
        <title>open_deep_research</title>
        <link>https://producthunt.programnotes.cn/en/p/open_deep_research/</link>
        <pubDate>Tue, 22 Jul 2025 15:33:16 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/open_deep_research/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1694250990115-ca7d9d991b24?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NTMxNjk1NjB8&amp;ixlib=rb-4.1.0" alt="Featured image of post open_deep_research" /&gt;&lt;h1 id=&#34;langchain-aiopen_&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/langchain-ai/open_deep_research&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;langchain-ai/open_deep_research&lt;/a&gt;
&lt;/h1&gt;&lt;h1 id=&#34;open-deep-research&#34;&gt;Open Deep Research
&lt;/h1&gt;&lt;img width=&#34;1388&#34; height=&#34;298&#34; alt=&#34;full_diagram&#34; src=&#34;https://github.com/user-attachments/assets/12a2371b-8be2-4219-9b48-90503eb43c69&#34; /&gt;
&lt;p&gt;Deep research has broken out as one of the most popular agent applications. This is a simple, configurable, fully open source deep research agent that works across many model providers, search tools, and MCP servers.&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Read more in our &lt;a class=&#34;link&#34; href=&#34;https://blog.langchain.com/open-deep-research/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;blog&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;See our &lt;a class=&#34;link&#34; href=&#34;https://www.youtube.com/watch?v=agGiWUpxkhg&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;video&lt;/a&gt; for a quick overview&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;-quickstart&#34;&gt;🚀 Quickstart
&lt;/h3&gt;&lt;ol&gt;
&lt;li&gt;Clone the repository and activate a virtual environment:&lt;/li&gt;
&lt;/ol&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;git clone https://github.com/langchain-ai/open_deep_research.git
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;cd&lt;/span&gt; open_deep_research
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;uv venv
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;source&lt;/span&gt; .venv/bin/activate  &lt;span class=&#34;c1&#34;&gt;# On Windows: .venv\Scripts\activate&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;ol start=&#34;2&#34;&gt;
&lt;li&gt;Install dependencies:&lt;/li&gt;
&lt;/ol&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;uv pip install -r pyproject.toml
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;ol start=&#34;3&#34;&gt;
&lt;li&gt;Set up your &lt;code&gt;.env&lt;/code&gt; file to customize the environment variables (for model selection, search tools, and other configuration settings):&lt;/li&gt;
&lt;/ol&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;cp .env.example .env
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;ol start=&#34;4&#34;&gt;
&lt;li&gt;Launch the assistant with the LangGraph server locally to open LangGraph Studio in your browser:&lt;/li&gt;
&lt;/ol&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# Install dependencies and start the LangGraph server&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;uvx --refresh --from &lt;span class=&#34;s2&#34;&gt;&amp;#34;langgraph-cli[inmem]&amp;#34;&lt;/span&gt; --with-editable . --python 3.11 langgraph dev --allow-blocking
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;Use this to open the Studio UI:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-fallback&#34; data-lang=&#34;fallback&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;- 🚀 API: http://127.0.0.1:2024
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;- 🎨 Studio UI: https://smith.langchain.com/studio/?baseUrl=http://127.0.0.1:2024
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;- 📚 API Docs: http://127.0.0.1:2024/docs
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;img width=&#34;817&#34; height=&#34;666&#34; alt=&#34;Screenshot 2025-07-13 at 11 21 12 PM&#34; src=&#34;https://github.com/user-attachments/assets/052f2ed3-c664-4a4f-8ec2-074349dcaa3f&#34; /&gt;
&lt;p&gt;Ask a question in the &lt;code&gt;messages&lt;/code&gt; input field and click &lt;code&gt;Submit&lt;/code&gt;.&lt;/p&gt;
&lt;h3 id=&#34;configurations&#34;&gt;Configurations
&lt;/h3&gt;&lt;p&gt;Open Deep Research offers extensive configuration options to customize the research process and model behavior. All configurations can be set via the web UI, environment variables, or by modifying the configuration directly.&lt;/p&gt;
&lt;h4 id=&#34;general-settings&#34;&gt;General Settings
&lt;/h4&gt;&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Max Structured Output Retries&lt;/strong&gt; (default: 3): Maximum number of retries for structured output calls from models when parsing fails&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Allow Clarification&lt;/strong&gt; (default: true): Whether to allow the researcher to ask clarifying questions before starting research&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Max Concurrent Research Units&lt;/strong&gt; (default: 5): Maximum number of research units to run concurrently using sub-agents. Higher values enable faster research but may hit rate limits&lt;/li&gt;
&lt;/ul&gt;
&lt;h4 id=&#34;research-configuration&#34;&gt;Research Configuration
&lt;/h4&gt;&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Search API&lt;/strong&gt; (default: Tavily): Choose from Tavily (works with all models), OpenAI Native Web Search, Anthropic Native Web Search, or None&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Max Researcher Iterations&lt;/strong&gt; (default: 3): Number of times the Research Supervisor will reflect on research and ask follow-up questions&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Max React Tool Calls&lt;/strong&gt; (default: 5): Maximum number of tool calling iterations in a single researcher step&lt;/li&gt;
&lt;/ul&gt;
&lt;h4 id=&#34;models&#34;&gt;Models
&lt;/h4&gt;&lt;p&gt;Open Deep Research uses multiple specialized models for different research tasks:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Summarization Model&lt;/strong&gt; (default: &lt;code&gt;openai:gpt-4.1-nano&lt;/code&gt;): Summarizes research results from search APIs&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Research Model&lt;/strong&gt; (default: &lt;code&gt;openai:gpt-4.1&lt;/code&gt;): Conducts research and analysis&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Compression Model&lt;/strong&gt; (default: &lt;code&gt;openai:gpt-4.1-mini&lt;/code&gt;): Compresses research findings from sub-agents&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Final Report Model&lt;/strong&gt; (default: &lt;code&gt;openai:gpt-4.1&lt;/code&gt;): Writes the final comprehensive report&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;All models are configured using &lt;a class=&#34;link&#34; href=&#34;https://python.langchain.com/docs/how_to/chat_models_universal_init/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;init_chat_model() API&lt;/a&gt; which supports providers like OpenAI, Anthropic, Google Vertex AI, and others.&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;Important Model Requirements:&lt;/strong&gt;&lt;/p&gt;
&lt;ol&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;Structured Outputs&lt;/strong&gt;: All models must support structured outputs. Check support &lt;a class=&#34;link&#34; href=&#34;https://python.langchain.com/docs/integrations/chat/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;here&lt;/a&gt;.&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;Search API Compatibility&lt;/strong&gt;: Research and Compression models must support your selected search API:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Anthropic search requires Anthropic models with web search capability&lt;/li&gt;
&lt;li&gt;OpenAI search requires OpenAI models with web search capability&lt;/li&gt;
&lt;li&gt;Tavily works with all models&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;Tool Calling&lt;/strong&gt;: All models must support tool calling functionality&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;Special Configurations&lt;/strong&gt;:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;For OpenRouter: Follow &lt;a class=&#34;link&#34; href=&#34;https://github.com/langchain-ai/open_deep_research/issues/75#issuecomment-2811472408&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;this guide&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;For local models via Ollama: See &lt;a class=&#34;link&#34; href=&#34;https://github.com/langchain-ai/open_deep_research/issues/65#issuecomment-2743586318&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;setup instructions&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;/ol&gt;
&lt;h4 id=&#34;example-mcp-model-context-protocol-servers&#34;&gt;Example MCP (Model Context Protocol) Servers
&lt;/h4&gt;&lt;p&gt;Open Deep Research supports MCP servers to extend research capabilities.&lt;/p&gt;
&lt;h4 id=&#34;local-mcp-servers&#34;&gt;Local MCP Servers
&lt;/h4&gt;&lt;p&gt;&lt;strong&gt;Filesystem MCP Server&lt;/strong&gt; provides secure file system operations with robust access control:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Read, write, and manage files and directories&lt;/li&gt;
&lt;li&gt;Perform operations like reading file contents, creating directories, moving files, and searching&lt;/li&gt;
&lt;li&gt;Restrict operations to predefined directories for security&lt;/li&gt;
&lt;li&gt;Support for both command-line configuration and dynamic MCP roots&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;Example usage:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;mcp-server-filesystem /path/to/allowed/dir1 /path/to/allowed/dir2
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h4 id=&#34;remote-mcp-servers&#34;&gt;Remote MCP Servers
&lt;/h4&gt;&lt;p&gt;&lt;strong&gt;Remote MCP servers&lt;/strong&gt; enable distributed agent coordination and support streamable HTTP requests. Unlike local servers, they can be multi-tenant and require more complex authentication.&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;Arcade MCP Server Example&lt;/strong&gt;:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-json&#34; data-lang=&#34;json&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;p&#34;&gt;{&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;nt&#34;&gt;&amp;#34;url&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;:&lt;/span&gt; &lt;span class=&#34;s2&#34;&gt;&amp;#34;https://api.arcade.dev/v1/mcps/ms_0ujssxh0cECutqzMgbtXSGnjorm&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;nt&#34;&gt;&amp;#34;tools&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;:&lt;/span&gt; &lt;span class=&#34;p&#34;&gt;[&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;Search_SearchHotels&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;s2&#34;&gt;&amp;#34;Search_SearchOneWayFlights&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;,&lt;/span&gt; &lt;span class=&#34;s2&#34;&gt;&amp;#34;Search_SearchRoundtripFlights&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;]&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;p&#34;&gt;}&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;p&gt;Remote servers can be configured as authenticated or unauthenticated and support JWT-based authentication through OAuth endpoints.&lt;/p&gt;
&lt;h3 id=&#34;evaluation&#34;&gt;Evaluation
&lt;/h3&gt;&lt;p&gt;A comprehensive batch evaluation system designed for detailed analysis and comparative studies.&lt;/p&gt;
&lt;h4 id=&#34;features&#34;&gt;&lt;strong&gt;Features:&lt;/strong&gt;
&lt;/h4&gt;&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Multi-dimensional Scoring&lt;/strong&gt;: Specialized evaluators with 0-1 scale ratings&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Dataset-driven Evaluation&lt;/strong&gt;: Batch processing across multiple test cases&lt;/li&gt;
&lt;/ul&gt;
&lt;h4 id=&#34;usage&#34;&gt;&lt;strong&gt;Usage:&lt;/strong&gt;
&lt;/h4&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# Run comprehensive evaluation on LangSmith datasets&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;python tests/run_evaluate.py
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h4 id=&#34;key-files&#34;&gt;&lt;strong&gt;Key Files:&lt;/strong&gt;
&lt;/h4&gt;&lt;ul&gt;
&lt;li&gt;&lt;code&gt;tests/run_evaluate.py&lt;/code&gt;: Main evaluation script&lt;/li&gt;
&lt;li&gt;&lt;code&gt;tests/evaluators.py&lt;/code&gt;: Specialized evaluator functions&lt;/li&gt;
&lt;li&gt;&lt;code&gt;tests/prompts.py&lt;/code&gt;: Evaluation prompts for each dimension&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;deployments-and-usages&#34;&gt;Deployments and Usages
&lt;/h3&gt;&lt;h4 id=&#34;langgraph-studio&#34;&gt;LangGraph Studio
&lt;/h4&gt;&lt;p&gt;Follow the &lt;a class=&#34;link&#34; href=&#34;#-quickstart&#34; &gt;quickstart&lt;/a&gt; to start LangGraph server locally and test the agent out on LangGraph Studio.&lt;/p&gt;
&lt;h4 id=&#34;hosted-deployment&#34;&gt;Hosted deployment
&lt;/h4&gt;&lt;p&gt;You can easily deploy to &lt;a class=&#34;link&#34; href=&#34;https://langchain-ai.github.io/langgraph/concepts/#deployment-options&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;LangGraph Platform&lt;/a&gt;.&lt;/p&gt;
&lt;h4 id=&#34;open-agent-platform&#34;&gt;Open Agent Platform
&lt;/h4&gt;&lt;p&gt;Open Agent Platform (OAP) is a UI from which non-technical users can build and configure their own agents. OAP is great for allowing users to configure the Deep Researcher with different MCP tools and search APIs that are best suited to their needs and the problems that they want to solve.&lt;/p&gt;
&lt;p&gt;We&amp;rsquo;ve deployed Open Deep Research to our public demo instance of OAP. All you need to do is add your API Keys, and you can test out the Deep Researcher for yourself! Try it out &lt;a class=&#34;link&#34; href=&#34;https://oap.langchain.com&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;here&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;You can also deploy your own instance of OAP, and make your own custom agents (like Deep Researcher) available on it to your users.&lt;/p&gt;
&lt;ol&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.oap.langchain.com/quickstart&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Deploy Open Agent Platform&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.oap.langchain.com/setup/agents&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Add Deep Researcher to OAP&lt;/a&gt;&lt;/li&gt;
&lt;/ol&gt;
&lt;h3 id=&#34;updates-&#34;&gt;Updates 🔥
&lt;/h3&gt;&lt;h3 id=&#34;legacy-implementations-&#34;&gt;Legacy Implementations 🏛️
&lt;/h3&gt;&lt;p&gt;The &lt;code&gt;src/legacy/&lt;/code&gt; folder contains two earlier implementations that provide alternative approaches to automated research:&lt;/p&gt;
&lt;h4 id=&#34;1-workflow-implementation-legacygraphpy&#34;&gt;1. Workflow Implementation (&lt;code&gt;legacy/graph.py&lt;/code&gt;)
&lt;/h4&gt;&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Plan-and-Execute&lt;/strong&gt;: Structured workflow with human-in-the-loop planning&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Sequential Processing&lt;/strong&gt;: Creates sections one by one with reflection&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Interactive Control&lt;/strong&gt;: Allows feedback and approval of report plans&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Quality Focused&lt;/strong&gt;: Emphasizes accuracy through iterative refinement&lt;/li&gt;
&lt;/ul&gt;
&lt;h4 id=&#34;2-multi-agent-implementation-legacymulti_agentpy&#34;&gt;2. Multi-Agent Implementation (&lt;code&gt;legacy/multi_agent.py&lt;/code&gt;)
&lt;/h4&gt;&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Supervisor-Researcher Architecture&lt;/strong&gt;: Coordinated multi-agent system&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Parallel Processing&lt;/strong&gt;: Multiple researchers work simultaneously&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Speed Optimized&lt;/strong&gt;: Faster report generation through concurrency&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;MCP Support&lt;/strong&gt;: Extensive Model Context Protocol integration&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;See &lt;code&gt;src/legacy/legacy.md&lt;/code&gt; for detailed documentation, configuration options, and usage examples for both legacy implementations.&lt;/p&gt;
</description>
        </item>
        <item>
        <title>anthropic-cookbook</title>
        <link>https://producthunt.programnotes.cn/en/p/anthropic-cookbook/</link>
        <pubDate>Sat, 21 Jun 2025 15:28:31 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/anthropic-cookbook/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1681055543029-8398bcd49519?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NTA0OTA4MDd8&amp;ixlib=rb-4.1.0" alt="Featured image of post anthropic-cookbook" /&gt;&lt;h1 id=&#34;anthropicsanthropic-cookbook&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;anthropics/anthropic-cookbook&lt;/a&gt;
&lt;/h1&gt;&lt;h1 id=&#34;anthropic-cookbook&#34;&gt;Anthropic Cookbook
&lt;/h1&gt;&lt;p&gt;The Anthropic Cookbook provides code and guides designed to help developers build with Claude, offering copy-able code snippets that you can easily integrate into your own projects.&lt;/p&gt;
&lt;h2 id=&#34;prerequisites&#34;&gt;Prerequisites
&lt;/h2&gt;&lt;p&gt;To make the most of the examples in this cookbook, you&amp;rsquo;ll need an Anthropic API key (sign up for free &lt;a class=&#34;link&#34; href=&#34;https://www.anthropic.com&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;here&lt;/a&gt;).&lt;/p&gt;
&lt;p&gt;While the code examples are primarily written in Python, the concepts can be adapted to any programming language that supports interaction with the Anthropic API.&lt;/p&gt;
&lt;p&gt;If you&amp;rsquo;re new to working with the Anthropic API, we recommend starting with our &lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/courses/tree/master/anthropic_api_fundamentals&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Anthropic API Fundamentals course&lt;/a&gt; to get a solid foundation.&lt;/p&gt;
&lt;h2 id=&#34;explore-further&#34;&gt;Explore Further
&lt;/h2&gt;&lt;p&gt;Looking for more resources to enhance your experience with Claude and AI assistants? Check out these helpful links:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://docs.anthropic.com/claude/docs/guide-to-anthropics-prompt-engineering-resources&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Anthropic developer documentation&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://support.anthropic.com&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Anthropic support docs&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://www.anthropic.com/discord&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Anthropic Discord community&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;contributing&#34;&gt;Contributing
&lt;/h2&gt;&lt;p&gt;The Anthropic Cookbook thrives on the contributions of the developer community. We value your input, whether it&amp;rsquo;s submitting an idea, fixing a typo, adding a new guide, or improving an existing one. By contributing, you help make this resource even more valuable for everyone.&lt;/p&gt;
&lt;p&gt;To avoid duplication of efforts, please review the existing issues and pull requests before contributing.&lt;/p&gt;
&lt;p&gt;If you have ideas for new examples or guides, share them on the &lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/issues&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;issues page&lt;/a&gt;.&lt;/p&gt;
&lt;h2 id=&#34;table-of-recipes&#34;&gt;Table of recipes
&lt;/h2&gt;&lt;h3 id=&#34;skills&#34;&gt;Skills
&lt;/h3&gt;&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/tree/main/skills/classification&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Classification&lt;/a&gt;: Explore techniques for text and data classification using Claude.&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/tree/main/skills/retrieval_augmented_generation&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Retrieval Augmented Generation&lt;/a&gt;: Learn how to enhance Claude&amp;rsquo;s responses with external knowledge.&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/tree/main/skills/summarization&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Summarization&lt;/a&gt;: Discover techniques for effective text summarization with Claude.&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;tool-use-and-integration&#34;&gt;Tool Use and Integration
&lt;/h3&gt;&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/tree/main/tool_use&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Tool use&lt;/a&gt;: Learn how to integrate Claude with external tools and functions to extend its capabilities.
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/tool_use/customer_service_agent.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Customer service agent&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/tool_use/calculator_tool.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Calculator integration&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/misc/how_to_make_sql_queries.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;SQL queries&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;third-party-integrations&#34;&gt;Third-Party Integrations
&lt;/h3&gt;&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/tree/main/third_party&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Retrieval augmented generation&lt;/a&gt;: Supplement Claude&amp;rsquo;s knowledge with external data sources.
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/third_party/Pinecone/rag_using_pinecone.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Vector databases (Pinecone)&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/third_party/Wikipedia/wikipedia-search-cookbook.ipynb/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Wikipedia&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/misc/read_web_pages_with_haiku.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Web pages&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/third_party/Brave/web_search_using_brave.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Internet search (Brave)&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/third_party/VoyageAI/how_to_create_embeddings.md&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Embeddings with Voyage AI&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;multimodal-capabilities&#34;&gt;Multimodal Capabilities
&lt;/h3&gt;&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/tree/main/multimodal&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Vision with Claude&lt;/a&gt;:
&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/multimodal/getting_started_with_vision.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Getting started with images&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/multimodal/best_practices_for_vision.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Best practices for vision&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/multimodal/reading_charts_graphs_powerpoints.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Interpreting charts and graphs&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/multimodal/how_to_transcribe_text.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Extracting content from forms&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/misc/illustrated_responses.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Generate images with Claude&lt;/a&gt;: Use Claude with Stable Diffusion for image generation.&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;advanced-techniques&#34;&gt;Advanced Techniques
&lt;/h3&gt;&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/multimodal/using_sub_agents.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Sub-agents&lt;/a&gt;: Learn how to use Haiku as a sub-agent in combination with Opus.&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/misc/pdf_upload_summarization.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Upload PDFs to Claude&lt;/a&gt;: Parse and pass PDFs as text to Claude.&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/misc/building_evals.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Automated evaluations&lt;/a&gt;: Use Claude to automate the prompt evaluation process.&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/misc/how_to_enable_json_mode.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Enable JSON mode&lt;/a&gt;: Ensure consistent JSON output from Claude.&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/misc/building_moderation_filter.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Create a moderation filter&lt;/a&gt;: Use Claude to create a content moderation filter for your application.&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/anthropics/anthropic-cookbook/blob/main/misc/prompt_caching.ipynb&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Prompt caching&lt;/a&gt;: Learn techniques for efficient prompt caching with Claude.&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;additional-resources&#34;&gt;Additional Resources
&lt;/h2&gt;&lt;ul&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/aws-samples/anthropic-on-aws&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Anthropic on AWS&lt;/a&gt;: Explore examples and solutions for using Claude on AWS infrastructure.&lt;/li&gt;
&lt;li&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/aws-samples/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;AWS Samples&lt;/a&gt;: A collection of code samples from AWS which can be adapted for use with Claude. Note that some samples may require modification to work optimally with Claude.&lt;/li&gt;
&lt;/ul&gt;
</description>
        </item>
        <item>
        <title>meeting-minutes</title>
        <link>https://producthunt.programnotes.cn/en/p/meeting-minutes/</link>
        <pubDate>Thu, 17 Apr 2025 15:30:04 +0800</pubDate>
        
        <guid>https://producthunt.programnotes.cn/en/p/meeting-minutes/</guid>
        <description>&lt;img src="https://images.unsplash.com/photo-1670144415812-f8dca862e8bc?ixid=M3w0NjAwMjJ8MHwxfHJhbmRvbXx8fHx8fHx8fDE3NDQ4NzQ5MzB8&amp;ixlib=rb-4.0.3" alt="Featured image of post meeting-minutes" /&gt;&lt;h1 id=&#34;zackriya-solutionsmeeting-minutes&#34;&gt;&lt;a class=&#34;link&#34; href=&#34;https://github.com/Zackriya-Solutions/meeting-minutes&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;Zackriya-Solutions/meeting-minutes&lt;/a&gt;
&lt;/h1&gt;&lt;div align=&#34;center&#34; style=&#34;border-bottom: none&#34;&gt;
    &lt;h1&gt;
        &lt;img src=&#34;docs/6.png&#34; width=&#34;400&#34; style=&#34;border-radius: 10px;&#34; /&gt;
        &lt;br&gt;
        Meetily - AI-Powered Meeting Assistant
    &lt;/h1&gt;
    &lt;br&gt;
    &lt;a href=&#34;https://github.com/Zackriya-Solutions/meeting-minutes/releases/tag/v0.0.3&#34;&gt;&lt;img src=&#34;https://img.shields.io/badge/Pre_Release-v0.0.3-brightgreen&#34; alt=&#34;Pre-Release&#34;&gt;&lt;/a&gt;
    &lt;a href=&#34;https://github.com/Zackriya-Solutions/meeting-minutes/releases/tag/v0.0.3&#34;&gt;&lt;img src=&#34;https://img.shields.io/badge/Stars-1000+-red&#34; alt=&#34;Stars&#34;&gt;&lt;/a&gt;
    &lt;a href=&#34;https://github.com/Zackriya-Solutions/meeting-minutes/releases/tag/v0.0.3&#34;&gt;&lt;img src=&#34;https://img.shields.io/badge/License-MIT-blue&#34; alt=&#34;License&#34;&gt;&lt;/a&gt;
    &lt;a href=&#34;https://github.com/Zackriya-Solutions/meeting-minutes/releases/tag/v0.0.3&#34;&gt;&lt;img src=&#34;https://img.shields.io/badge/Supported_OS-macOS,_Windows-yellow&#34; alt=&#34;Supported OS&#34;&gt;&lt;/a&gt;
    &lt;br&gt;
    &lt;h3&gt;
    &lt;br&gt;
    Open source Ai Assistant for taking meeting notes
    &lt;/h3&gt;
    &lt;p align=&#34;center&#34;&gt;
    &lt;a href=&#34;https://meetily.zackriya.com&#34;&gt;&lt;b&gt;Website&lt;/b&gt;&lt;/a&gt; •
    &lt;a href=&#34;https://in.linkedin.com/company/zackriya-solutions&#34;&gt;&lt;b&gt;Author&lt;/b&gt;&lt;/a&gt;
    •
    &lt;a href=&#34;https://discord.gg/crRymMQBFH&#34;&gt;&lt;b&gt;Discord Channel&lt;/b&gt;&lt;/a&gt;
&lt;/p&gt;
    &lt;p align=&#34;center&#34;&gt;
&lt;p&gt;An AI-Powered Meeting Assistant that captures live meeting audio, transcribes it in real-time, and generates summaries while ensuring user privacy. Perfect for teams who want to focus on discussions while automatically capturing and organizing meeting content without the need for external servers or complex infrastructure.&lt;/p&gt;
&lt;/p&gt;
&lt;p align=&#34;center&#34;&gt;
    &lt;img src=&#34;docs/demo_small.gif&#34; width=&#34;650&#34; alt=&#34;Meetily Demo&#34; /&gt;
    &lt;br&gt;
    &lt;a href=&#34;https://youtu.be/5k_Q5Wlahuk&#34;&gt;View full Demo Video&lt;/a&gt;
&lt;/p&gt;
&lt;/div&gt;
&lt;h2 id=&#34;overview&#34;&gt;Overview
&lt;/h2&gt;&lt;p&gt;An AI-powered meeting assistant that captures live meeting audio, transcribes it in real-time, and generates summaries while ensuring user privacy. Perfect for teams who want to focus on discussions while automatically capturing and organizing meeting content.&lt;/p&gt;
&lt;h3 id=&#34;why&#34;&gt;Why?
&lt;/h3&gt;&lt;p&gt;While there are many meeting transcription tools available, this solution stands out by offering:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Privacy First&lt;/strong&gt;: All processing happens locally on your device&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Cost Effective&lt;/strong&gt;: Uses open-source AI models instead of expensive APIs&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Flexible&lt;/strong&gt;: Works offline, supports multiple meeting platforms&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Customizable&lt;/strong&gt;: Self-host and modify for your specific needs&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Intelligent&lt;/strong&gt;: Built-in knowledge graph for semantic search across meetings&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;features&#34;&gt;Features
&lt;/h2&gt;&lt;p&gt;✅ Modern, responsive UI with real-time updates&lt;/p&gt;
&lt;p&gt;✅ Real-time audio capture (microphone + system audio)&lt;/p&gt;
&lt;p&gt;✅ Live transcription using Whisper.cpp&lt;/p&gt;
&lt;p&gt;🚧 Speaker diarization&lt;/p&gt;
&lt;p&gt;✅ Local processing for privacy&lt;/p&gt;
&lt;p&gt;✅ Packaged the app for macOS and Windows&lt;/p&gt;
&lt;p&gt;🚧 Export to Markdown/PDF&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;&lt;strong&gt;Note&lt;/strong&gt;: We have a Rust-based implementation that explores better performance and native integration. It currently implements:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;✅ Real-time audio capture from both microphone and system audio&lt;/li&gt;
&lt;li&gt;✅ Live transcription using locally-running Whisper&lt;/li&gt;
&lt;li&gt;✅ Speaker diarization&lt;/li&gt;
&lt;li&gt;✅ Rich text editor for notes&lt;/li&gt;
&lt;/ul&gt;
&lt;/blockquote&gt;
&lt;p&gt;We are currently working on:&lt;/p&gt;
&lt;blockquote&gt;
&lt;ul&gt;
&lt;li&gt;✅ Export to Markdown/PDF&lt;/li&gt;
&lt;li&gt;✅ Export to HTML&lt;/li&gt;
&lt;/ul&gt;
&lt;/blockquote&gt;
&lt;h2 id=&#34;release-003&#34;&gt;Release 0.0.3
&lt;/h2&gt;&lt;p&gt;A new release is available!&lt;/p&gt;
&lt;p&gt;Please check out the release &lt;a class=&#34;link&#34; href=&#34;https://github.com/Zackriya-Solutions/meeting-minutes/releases/tag/v0.0.3&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;here&lt;/a&gt;.&lt;/p&gt;
&lt;h3 id=&#34;whats-new&#34;&gt;What&amp;rsquo;s New
&lt;/h3&gt;&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Windows Support&lt;/strong&gt;: Fixed audio capture issues on Windows&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Improved Error Handling&lt;/strong&gt;: Better error handling and logging for audio devices&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Enhanced Device Detection&lt;/strong&gt;: More robust audio device detection across platforms&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Windows Installers&lt;/strong&gt;: Added both .exe and .msi installers for Windows&lt;/li&gt;
&lt;li&gt;Transcription quality is improved&lt;/li&gt;
&lt;li&gt;Bug fixes and improvements for frontend&lt;/li&gt;
&lt;li&gt;Better backend app build process&lt;/li&gt;
&lt;li&gt;Improved documentation&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;what-would-be-next&#34;&gt;What would be next?
&lt;/h3&gt;&lt;ul&gt;
&lt;li&gt;Database connection to save meeting minutes&lt;/li&gt;
&lt;li&gt;Improve summarization quality for smaller LLM models&lt;/li&gt;
&lt;li&gt;Add download options for meeting transcriptions&lt;/li&gt;
&lt;li&gt;Add download option for summary&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;known-issues&#34;&gt;Known issues
&lt;/h3&gt;&lt;ul&gt;
&lt;li&gt;Smaller LLMs can hallucinate, making summarization quality poor; Please use model above 32B parameter size&lt;/li&gt;
&lt;li&gt;Backend build process requires CMake, C++ compiler, etc. Making it harder to build&lt;/li&gt;
&lt;li&gt;Backend build process requires Python 3.10 or newer&lt;/li&gt;
&lt;li&gt;Frontend build process requires Node.js&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;llm-integration&#34;&gt;LLM Integration
&lt;/h2&gt;&lt;p&gt;The backend supports multiple LLM providers through a unified interface. Current implementations include:&lt;/p&gt;
&lt;h3 id=&#34;supported-providers&#34;&gt;Supported Providers
&lt;/h3&gt;&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Anthropic&lt;/strong&gt; (Claude models)&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Groq&lt;/strong&gt; (Llama3.2 90 B)&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Ollama&lt;/strong&gt; (Local models that supports function calling)&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;configuration&#34;&gt;Configuration
&lt;/h3&gt;&lt;p&gt;Create &lt;code&gt;.env&lt;/code&gt; file with your API keys:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;5
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-env&#34; data-lang=&#34;env&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# Required for Anthropic&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nv&#34;&gt;ANTHROPIC_API_KEY&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;your_key_here  
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# Required for Groq &lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nv&#34;&gt;GROQ_API_KEY&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;your_key_here
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h2 id=&#34;system-architecture&#34;&gt;System Architecture
&lt;/h2&gt;&lt;p&gt;&lt;img src=&#34;https://producthunt.programnotes.cn/docs/HighLevel.jpg&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;High Level Architecture&#34;
	
	
&gt;&lt;/p&gt;
&lt;h3 id=&#34;core-components&#34;&gt;Core Components
&lt;/h3&gt;&lt;ol&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;Audio Capture Service&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Real-time microphone/system audio capture&lt;/li&gt;
&lt;li&gt;Audio preprocessing pipeline&lt;/li&gt;
&lt;li&gt;Built with Rust (experimental) and Python&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;Transcription Engine&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Whisper.cpp for local transcription&lt;/li&gt;
&lt;li&gt;Supports multiple model sizes (tiny-&amp;gt;large)&lt;/li&gt;
&lt;li&gt;GPU-accelerated processing&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;LLM Orchestrator&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Unified interface for multiple providers&lt;/li&gt;
&lt;li&gt;Automatic fallback handling&lt;/li&gt;
&lt;li&gt;Chunk processing with overlap&lt;/li&gt;
&lt;li&gt;Model configuration:&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;Data Services&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;ChromaDB&lt;/strong&gt;: Vector store for transcript embeddings&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;SQLite&lt;/strong&gt;: Process tracking and metadata storage&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;&lt;strong&gt;API Layer&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;FastAPI endpoints:
&lt;ul&gt;
&lt;li&gt;POST /upload&lt;/li&gt;
&lt;li&gt;POST /process&lt;/li&gt;
&lt;li&gt;GET /summary/{id}&lt;/li&gt;
&lt;li&gt;DELETE /summary/{id}&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;/ol&gt;
&lt;h3 id=&#34;deployment-architecture&#34;&gt;Deployment Architecture
&lt;/h3&gt;&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Frontend&lt;/strong&gt;: Tauri app + Next.js (packaged executables)&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Backend&lt;/strong&gt;: Python FastAPI:
&lt;ul&gt;
&lt;li&gt;Transcript workers&lt;/li&gt;
&lt;li&gt;LLM inference&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;prerequisites&#34;&gt;Prerequisites
&lt;/h2&gt;&lt;ul&gt;
&lt;li&gt;Node.js 18+&lt;/li&gt;
&lt;li&gt;Python 3.10+&lt;/li&gt;
&lt;li&gt;FFmpeg&lt;/li&gt;
&lt;li&gt;Rust 1.65+ (for experimental features)&lt;/li&gt;
&lt;li&gt;Cmake 3.22+ (for building the frontend)&lt;/li&gt;
&lt;li&gt;For Windows: Visual Studio Build Tools with C++ development workload&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;setup-instructions&#34;&gt;Setup Instructions
&lt;/h2&gt;&lt;h3 id=&#34;1-frontend-setup&#34;&gt;1. Frontend Setup
&lt;/h3&gt;&lt;h4 id=&#34;run-packaged-version&#34;&gt;Run packaged version
&lt;/h4&gt;&lt;p&gt;Go to the &lt;a class=&#34;link&#34; href=&#34;https://github.com/Zackriya-Solutions/meeting-minutes/releases&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;releases page&lt;/a&gt; and download the latest version.&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;For Windows:&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Download either the &lt;code&gt;.exe&lt;/code&gt; installer or &lt;code&gt;.msi&lt;/code&gt; package&lt;/li&gt;
&lt;li&gt;Once the installer is downloaded, double-click the executable file to run it&lt;/li&gt;
&lt;li&gt;Windows will ask if you want to run untrusted apps, click &amp;ldquo;More info&amp;rdquo; and choose &amp;ldquo;Run anyway&amp;rdquo;&lt;/li&gt;
&lt;li&gt;Follow the installation wizard to complete the setup&lt;/li&gt;
&lt;li&gt;The application will be installed and available on your desktop&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;&lt;strong&gt;For macOS:&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Download the &lt;code&gt;dmg_darwin_arch64.zip&lt;/code&gt; file&lt;/li&gt;
&lt;li&gt;Extract the file&lt;/li&gt;
&lt;li&gt;Double-click the &lt;code&gt;.dmg&lt;/code&gt; file inside the extracted folder&lt;/li&gt;
&lt;li&gt;Drag the application to your Applications folder&lt;/li&gt;
&lt;li&gt;Execute the following command in terminal to remove the quarantine attribute:
&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-fallback&#34; data-lang=&#34;fallback&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;xattr -c /Applications/meeting-minutes-frontend.app
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;Provide necessary permissions for audio capture and microphone access.&lt;/p&gt;
&lt;h4 id=&#34;dev-run&#34;&gt;Dev run
&lt;/h4&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt;1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;6
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;7
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;8
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# Navigate to frontend directory&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;cd&lt;/span&gt; frontend
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# Give execute permissions to clean_build.sh&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;chmod +x clean_build.sh
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# run clean_build.sh&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;./clean_build.sh
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h3 id=&#34;2-backend-setup&#34;&gt;2. Backend Setup
&lt;/h3&gt;&lt;div class=&#34;highlight&#34;&gt;&lt;div class=&#34;chroma&#34;&gt;
&lt;table class=&#34;lntable&#34;&gt;&lt;tr&gt;&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code&gt;&lt;span class=&#34;lnt&#34;&gt; 1
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 2
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 3
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 4
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 5
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 6
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 7
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 8
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt; 9
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;10
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;11
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;12
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;13
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;14
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;15
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;16
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;17
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;18
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;19
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;20
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;21
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;22
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;23
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;24
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;25
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;26
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;27
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;28
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;29
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;30
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;31
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;32
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;33
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;34
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;35
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;36
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;37
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;38
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;39
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;40
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;41
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;42
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;43
&lt;/span&gt;&lt;span class=&#34;lnt&#34;&gt;44
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;
&lt;td class=&#34;lntd&#34;&gt;
&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-bash&#34; data-lang=&#34;bash&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# Clone the repository&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;git clone https://github.com/Zackriya-Solutions/meeting-minutes.git
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;cd&lt;/span&gt; meeting-minutes/backend
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# Create and activate virtual environment&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# On macOS/Linux:&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;python -m venv venv
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;source&lt;/span&gt; venv/bin/activate
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# On Windows:&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;python -m venv venv
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;.&lt;span class=&#34;se&#34;&gt;\v&lt;/span&gt;env&lt;span class=&#34;se&#34;&gt;\S&lt;/span&gt;cripts&lt;span class=&#34;se&#34;&gt;\a&lt;/span&gt;ctivate
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# Install dependencies&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;pip install -r requirements.txt
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# Add environment file with API keys&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# On macOS/Linux:&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;echo&lt;/span&gt; -e &lt;span class=&#34;s2&#34;&gt;&amp;#34;ANTHROPIC_API_KEY=your_api_key\nGROQ_API_KEY=your_api_key&amp;#34;&lt;/span&gt; &lt;span class=&#34;p&#34;&gt;|&lt;/span&gt; tee .env
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# On Windows (PowerShell):&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;ANTHROPIC_API_KEY=your_api_key`nGROQ_API_KEY=your_api_key&amp;#34;&lt;/span&gt; &lt;span class=&#34;p&#34;&gt;|&lt;/span&gt; Out-File -FilePath .env -Encoding utf8
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# Configure environment variables for Groq&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# On macOS/Linux:&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nb&#34;&gt;export&lt;/span&gt; &lt;span class=&#34;nv&#34;&gt;GROQ_API_KEY&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;your_groq_api_key
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# On Windows (PowerShell):&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;nv&#34;&gt;$env&lt;/span&gt;:GROQ_API_KEY&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;your_groq_api_key&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# Build dependencies&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# On macOS/Linux:&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;chmod +x build_whisper.sh
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;./build_whisper.sh
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# On Windows:&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;.&lt;span class=&#34;se&#34;&gt;\b&lt;/span&gt;uild_whisper.bat
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# Start backend servers&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# On macOS/Linux:&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;./clean_start_backend.sh
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;c1&#34;&gt;# On Windows:&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;.&lt;span class=&#34;se&#34;&gt;\s&lt;/span&gt;tart_with_output.ps1
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;
&lt;/div&gt;
&lt;/div&gt;&lt;h2 id=&#34;development-guidelines&#34;&gt;Development Guidelines
&lt;/h2&gt;&lt;ul&gt;
&lt;li&gt;Follow the established project structure&lt;/li&gt;
&lt;li&gt;Write tests for new features&lt;/li&gt;
&lt;li&gt;Document API changes&lt;/li&gt;
&lt;li&gt;Use type hints in Python code&lt;/li&gt;
&lt;li&gt;Follow ESLint configuration for JavaScript/TypeScript&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;contributing&#34;&gt;Contributing
&lt;/h2&gt;&lt;ol&gt;
&lt;li&gt;Fork the repository&lt;/li&gt;
&lt;li&gt;Create a feature branch&lt;/li&gt;
&lt;li&gt;Submit a pull request&lt;/li&gt;
&lt;/ol&gt;
&lt;h2 id=&#34;license&#34;&gt;License
&lt;/h2&gt;&lt;p&gt;MIT License - Feel free to use this project for your own purposes.&lt;/p&gt;
&lt;h2 id=&#34;introducing-subscription&#34;&gt;Introducing Subscription
&lt;/h2&gt;&lt;p&gt;We are planning to add a subscription option so that you don&amp;rsquo;t have to run the backend on your own server. This will help you scale better and run the service 24/7. This is based on a few requests we received. If you are interested, please fill out the form &lt;a class=&#34;link&#34; href=&#34;http://zackriya.com/aimeeting/&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;here&lt;/a&gt;.&lt;/p&gt;
&lt;p&gt;Last updated: March 3, 2025&lt;/p&gt;
&lt;h2 id=&#34;star-history&#34;&gt;Star History
&lt;/h2&gt;&lt;p&gt;&lt;a class=&#34;link&#34; href=&#34;https://star-history.com/#Zackriya-Solutions/meeting-minutes&amp;amp;Date&#34;  target=&#34;_blank&#34; rel=&#34;noopener&#34;
    &gt;&lt;img src=&#34;https://api.star-history.com/svg?repos=Zackriya-Solutions/meeting-minutes&amp;amp;type=Date&#34;
	
	
	
	loading=&#34;lazy&#34;
	
		alt=&#34;Star History Chart&#34;
	
	
&gt;&lt;/a&gt;&lt;/p&gt;
</description>
        </item>
        
    </channel>
</rss>
