<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
  <title>Ollama models</title>
  <id>https://ollama.com/search?o=newest</id>
  <author>
    <name>Model Library</name>
  </author>
  <link href="https://ollama.com/search?o=newest" rel="self"/>
  <updated>2026-04-12T06:53:45.589603+00:00</updated>
  <entry>
    <title>glm-5.1</title>
    <id>https://ollama.com/library/glm-5.1</id>
    <link href="https://ollama.com/library/glm-5.1"/>
    <summary>GLM-5.1 is our next-generation flagship model for agentic engineering, with significantly stronger coding capabilities than its predecessor. It achieves state-of-the-art performance on SWE-Bench Pro and leads GLM-5 by a wide margin.</summary>
    <updated>2026-04-07T15:00:00+00:00</updated>
    <category term="tools"/>
    <category term="thinking"/>
    <content type="html">&lt;p&gt;GLM-5.1 is our next-generation flagship model for agentic engineering, with significantly stronger coding capabilities than its predecessor. It achieves state-of-the-art performance on SWE-Bench Pro and leads GLM-5 by a wide margin.&lt;/p&gt;&lt;p&gt;Pulls: 24.4K&lt;/p&gt;&lt;p&gt;Tags: 1&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>gemma4</title>
    <id>https://ollama.com/library/gemma4</id>
    <link href="https://ollama.com/library/gemma4"/>
    <summary>Gemma 4 models are designed to deliver frontier-level performance at each size. They are well-suited for reasoning, agentic workflows, coding, and multimodal understanding.</summary>
    <updated>2026-04-06T01:39:00+00:00</updated>
    <category term="e2b"/>
    <category term="e4b"/>
    <category term="26b"/>
    <category term="31b"/>
    <category term="vision"/>
    <category term="tools"/>
    <category term="thinking"/>
    <category term="audio"/>
    <content type="html">&lt;p&gt;Gemma 4 models are designed to deliver frontier-level performance at each size. They are well-suited for reasoning, agentic workflows, coding, and multimodal understanding.&lt;/p&gt;&lt;p&gt;Pulls: 2.5M&lt;/p&gt;&lt;p&gt;Tags: 17&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>qwen3.5</title>
    <id>https://ollama.com/library/qwen3.5</id>
    <link href="https://ollama.com/library/qwen3.5"/>
    <summary>Qwen 3.5 is a family of open-source multimodal models that delivers exceptional utility and performance.</summary>
    <updated>2026-04-01T23:10:00+00:00</updated>
    <category term="0.8b"/>
    <category term="2b"/>
    <category term="4b"/>
    <category term="9b"/>
    <category term="27b"/>
    <category term="35b"/>
    <category term="122b"/>
    <category term="vision"/>
    <category term="tools"/>
    <category term="thinking"/>
    <content type="html">&lt;p&gt;Qwen 3.5 is a family of open-source multimodal models that delivers exceptional utility and performance.&lt;/p&gt;&lt;p&gt;Pulls: 5.8M&lt;/p&gt;&lt;p&gt;Tags: 58&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>nemotron-cascade-2</title>
    <id>https://ollama.com/library/nemotron-cascade-2</id>
    <link href="https://ollama.com/library/nemotron-cascade-2"/>
    <summary>An open 30B MoE model from NVIDIA with 3B activated parameters that delivers strong reasoning and agentic capabilities.</summary>
    <updated>2026-03-20T20:10:00+00:00</updated>
    <category term="30b"/>
    <category term="tools"/>
    <category term="thinking"/>
    <content type="html">&lt;p&gt;An open 30B MoE model from NVIDIA with 3B activated parameters that delivers strong reasoning and agentic capabilities.&lt;/p&gt;&lt;p&gt;Pulls: 89K&lt;/p&gt;&lt;p&gt;Tags: 3&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>minimax-m2.7</title>
    <id>https://ollama.com/library/minimax-m2.7</id>
    <link href="https://ollama.com/library/minimax-m2.7"/>
    <summary>MiniMax's M2-series model for coding, agentic workflows, and professional productivity.</summary>
    <updated>2026-03-18T18:05:00+00:00</updated>
    <category term="tools"/>
    <category term="thinking"/>
    <content type="html">&lt;p&gt;MiniMax's M2-series model for coding, agentic workflows, and professional productivity.&lt;/p&gt;&lt;p&gt;Pulls: 65.2K&lt;/p&gt;&lt;p&gt;Tags: 1&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>nemotron-3-nano</title>
    <id>https://ollama.com/library/nemotron-3-nano</id>
    <link href="https://ollama.com/library/nemotron-3-nano"/>
    <summary>Nemotron-3-Nano is a new Standard for Efficient, Open, and Intelligent Agentic Models, now updated with a 4B parameter count model.</summary>
    <updated>2026-03-16T15:34:00+00:00</updated>
    <category term="4b"/>
    <category term="30b"/>
    <category term="tools"/>
    <category term="thinking"/>
    <content type="html">&lt;p&gt;Nemotron-3-Nano is a new Standard for Efficient, Open, and Intelligent Agentic Models, now updated with a 4B parameter count model.&lt;/p&gt;&lt;p&gt;Pulls: 383.7K&lt;/p&gt;&lt;p&gt;Tags: 9&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>nemotron-3-super</title>
    <id>https://ollama.com/library/nemotron-3-super</id>
    <link href="https://ollama.com/library/nemotron-3-super"/>
    <summary>NVIDIA Nemotron 3 Super is a 120B open MoE model activating just 12B parameters to deliver maximum compute efficiency and accuracy for complex multi-agent applications.</summary>
    <updated>2026-03-11T16:00:00+00:00</updated>
    <category term="120b"/>
    <category term="tools"/>
    <category term="thinking"/>
    <content type="html">&lt;p&gt;NVIDIA Nemotron 3 Super is a 120B open MoE model activating just 12B parameters to deliver maximum compute efficiency and accuracy for complex multi-agent applications.&lt;/p&gt;&lt;p&gt;Pulls: 223K&lt;/p&gt;&lt;p&gt;Tags: 7&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>lfm2</title>
    <id>https://ollama.com/library/lfm2</id>
    <link href="https://ollama.com/library/lfm2"/>
    <summary>LFM2 is a family of hybrid models designed for on-device deployment. LFM2-24B-A2B is the largest model in the family, scaling the architecture to 24 billion parameters while keeping inference efficient.</summary>
    <updated>2026-02-24T01:17:00+00:00</updated>
    <category term="24b"/>
    <category term="tools"/>
    <content type="html">&lt;p&gt;LFM2 is a family of hybrid models designed for on-device deployment. LFM2-24B-A2B is the largest model in the family, scaling the architecture to 24 billion parameters while keeping inference efficient.&lt;/p&gt;&lt;p&gt;Pulls: 1.1M&lt;/p&gt;&lt;p&gt;Tags: 6&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>minimax-m2.5</title>
    <id>https://ollama.com/library/minimax-m2.5</id>
    <link href="https://ollama.com/library/minimax-m2.5"/>
    <summary>MiniMax-M2.5 is a state-of-the-art large language model designed for real-world productivity and coding tasks.</summary>
    <updated>2026-02-12T09:22:00+00:00</updated>
    <category term="tools"/>
    <category term="thinking"/>
    <content type="html">&lt;p&gt;MiniMax-M2.5 is a state-of-the-art large language model designed for real-world productivity and coding tasks.&lt;/p&gt;&lt;p&gt;Pulls: 159.9K&lt;/p&gt;&lt;p&gt;Tags: 1&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>glm-5</title>
    <id>https://ollama.com/library/glm-5</id>
    <link href="https://ollama.com/library/glm-5"/>
    <summary>A strong reasoning and agentic model from Z.ai with 744B total parameters (40B active), built for complex systems engineering and long-horizon tasks.</summary>
    <updated>2026-02-11T18:43:00+00:00</updated>
    <category term="tools"/>
    <category term="thinking"/>
    <content type="html">&lt;p&gt;A strong reasoning and agentic model from Z.ai with 744B total parameters (40B active), built for complex systems engineering and long-horizon tasks.&lt;/p&gt;&lt;p&gt;Pulls: 181.7K&lt;/p&gt;&lt;p&gt;Tags: 1&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>qwen3-coder-next</title>
    <id>https://ollama.com/library/qwen3-coder-next</id>
    <link href="https://ollama.com/library/qwen3-coder-next"/>
    <summary>Qwen3-Coder-Next is a coding-focused language model from Alibaba's Qwen team, optimized for agentic coding workflows and local development.</summary>
    <updated>2026-02-06T05:23:00+00:00</updated>
    <category term="tools"/>
    <content type="html">&lt;p&gt;Qwen3-Coder-Next is a coding-focused language model from Alibaba's Qwen team, optimized for agentic coding workflows and local development.&lt;/p&gt;&lt;p&gt;Pulls: 1M&lt;/p&gt;&lt;p&gt;Tags: 4&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>glm-ocr</title>
    <id>https://ollama.com/library/glm-ocr</id>
    <link href="https://ollama.com/library/glm-ocr"/>
    <summary>GLM-OCR is a multimodal OCR model for complex document understanding, built on the GLM-V encoder–decoder architecture.</summary>
    <updated>2026-02-02T23:29:00+00:00</updated>
    <category term="vision"/>
    <category term="tools"/>
    <content type="html">&lt;p&gt;GLM-OCR is a multimodal OCR model for complex document understanding, built on the GLM-V encoder–decoder architecture.&lt;/p&gt;&lt;p&gt;Pulls: 247K&lt;/p&gt;&lt;p&gt;Tags: 3&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>kimi-k2.5</title>
    <id>https://ollama.com/library/kimi-k2.5</id>
    <link href="https://ollama.com/library/kimi-k2.5"/>
    <summary>Kimi K2.5 is an open-source, native multimodal agentic model that seamlessly integrates vision and language understanding with advanced agentic capabilities, instant and thinking modes, as well as conversational and agentic paradigms.</summary>
    <updated>2026-01-27T07:29:00+00:00</updated>
    <category term="vision"/>
    <category term="tools"/>
    <category term="thinking"/>
    <content type="html">&lt;p&gt;Kimi K2.5 is an open-source, native multimodal agentic model that seamlessly integrates vision and language understanding with advanced agentic capabilities, instant and thinking modes, as well as conversational and agentic paradigms.&lt;/p&gt;&lt;p&gt;Pulls: 236.2K&lt;/p&gt;&lt;p&gt;Tags: 1&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>glm-4.7-flash</title>
    <id>https://ollama.com/library/glm-4.7-flash</id>
    <link href="https://ollama.com/library/glm-4.7-flash"/>
    <summary>As the strongest model in the 30B class, GLM-4.7-Flash offers a new option for lightweight deployment that balances performance and efficiency.</summary>
    <updated>2026-01-24T23:40:00+00:00</updated>
    <category term="tools"/>
    <category term="thinking"/>
    <content type="html">&lt;p&gt;As the strongest model in the 30B class, GLM-4.7-Flash offers a new option for lightweight deployment that balances performance and efficiency.&lt;/p&gt;&lt;p&gt;Pulls: 1.1M&lt;/p&gt;&lt;p&gt;Tags: 4&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>lfm2.5-thinking</title>
    <id>https://ollama.com/library/lfm2.5-thinking</id>
    <link href="https://ollama.com/library/lfm2.5-thinking"/>
    <summary>LFM2.5 is a new family of hybrid models designed for on-device deployment.</summary>
    <updated>2026-01-20T12:41:00+00:00</updated>
    <category term="1.2b"/>
    <category term="tools"/>
    <content type="html">&lt;p&gt;LFM2.5 is a new family of hybrid models designed for on-device deployment.&lt;/p&gt;&lt;p&gt;Pulls: 1.1M&lt;/p&gt;&lt;p&gt;Tags: 5&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>translategemma</title>
    <id>https://ollama.com/library/translategemma</id>
    <link href="https://ollama.com/library/translategemma"/>
    <summary>A new collection of open translation models built on Gemma 3, helping people communicate across 55 languages.</summary>
    <updated>2026-01-16T20:57:00+00:00</updated>
    <category term="4b"/>
    <category term="12b"/>
    <category term="27b"/>
    <category term="vision"/>
    <content type="html">&lt;p&gt;A new collection of open translation models built on Gemma 3, helping people communicate across 55 languages.&lt;/p&gt;&lt;p&gt;Pulls: 1.1M&lt;/p&gt;&lt;p&gt;Tags: 13&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>glm-4.7</title>
    <id>https://ollama.com/library/glm-4.7</id>
    <link href="https://ollama.com/library/glm-4.7"/>
    <summary>Advancing the Coding Capability</summary>
    <updated>2025-12-23T17:56:00+00:00</updated>
    <category term="tools"/>
    <category term="thinking"/>
    <content type="html">&lt;p&gt;Advancing the Coding Capability&lt;/p&gt;&lt;p&gt;Pulls: 87.8K&lt;/p&gt;&lt;p&gt;Tags: 1&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>minimax-m2.1</title>
    <id>https://ollama.com/library/minimax-m2.1</id>
    <link href="https://ollama.com/library/minimax-m2.1"/>
    <summary>Exceptional multilingual capabilities to elevate code engineering</summary>
    <updated>2025-12-23T03:19:00+00:00</updated>
    <category term="tools"/>
    <content type="html">&lt;p&gt;Exceptional multilingual capabilities to elevate code engineering&lt;/p&gt;&lt;p&gt;Pulls: 38.8K&lt;/p&gt;&lt;p&gt;Tags: 1&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>gemini-3-flash-preview</title>
    <id>https://ollama.com/library/gemini-3-flash-preview</id>
    <link href="https://ollama.com/library/gemini-3-flash-preview"/>
    <summary>Gemini 3 Flash offers frontier intelligence built for speed at a fraction of the cost.</summary>
    <updated>2025-12-20T20:44:00+00:00</updated>
    <category term="vision"/>
    <category term="tools"/>
    <category term="thinking"/>
    <content type="html">&lt;p&gt;Gemini 3 Flash offers frontier intelligence built for speed at a fraction of the cost.&lt;/p&gt;&lt;p&gt;Pulls: 129.1K&lt;/p&gt;&lt;p&gt;Tags: 2&lt;/p&gt;</content>
  </entry>
  <entry>
    <title>functiongemma</title>
    <id>https://ollama.com/library/functiongemma</id>
    <link href="https://ollama.com/library/functiongemma"/>
    <summary>FunctionGemma is a specialized version of Google's Gemma 3 270M model fine-tuned explicitly for function calling.</summary>
    <updated>2025-12-18T07:03:00+00:00</updated>
    <category term="270m"/>
    <category term="tools"/>
    <content type="html">&lt;p&gt;FunctionGemma is a specialized version of Google's Gemma 3 270M model fine-tuned explicitly for function calling.&lt;/p&gt;&lt;p&gt;Pulls: 144.3K&lt;/p&gt;&lt;p&gt;Tags: 4&lt;/p&gt;</content>
  </entry>
</feed>
