<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:content="http://purl.org/rss/1.0/modules/content/">
  <channel>
    <title>Transformer优化 on Answer</title>
    <link>https://answer.freetools.me/tags/transformer%E4%BC%98%E5%8C%96/</link>
    <description>Recent content in Transformer优化 on Answer</description>
    <generator>Hugo -- 0.152.2</generator>
    <language>zh-cn</language>
    <lastBuildDate>Mon, 09 Mar 2026 03:57:50 +0800</lastBuildDate>
    <atom:link href="https://answer.freetools.me/tags/transformer%E4%BC%98%E5%8C%96/index.xml" rel="self" type="application/rss+xml" />
    <item>
      <title>为什么Flash Attention能将注意力计算提速数倍而不损失精度——从GPU内存墙到IO感知算法的技术突围</title>
      <link>https://answer.freetools.me/%E4%B8%BA%E4%BB%80%E4%B9%88flash-attention%E8%83%BD%E5%B0%86%E6%B3%A8%E6%84%8F%E5%8A%9B%E8%AE%A1%E7%AE%97%E6%8F%90%E9%80%9F%E6%95%B0%E5%80%8D%E8%80%8C%E4%B8%8D%E6%8D%9F%E5%A4%B1%E7%B2%BE%E5%BA%A6%E4%BB%8Egpu%E5%86%85%E5%AD%98%E5%A2%99%E5%88%B0io%E6%84%9F%E7%9F%A5%E7%AE%97%E6%B3%95%E7%9A%84%E6%8A%80%E6%9C%AF%E7%AA%81%E5%9B%B4/</link>
      <pubDate>Mon, 09 Mar 2026 03:57:50 +0800</pubDate>
      <guid>https://answer.freetools.me/%E4%B8%BA%E4%BB%80%E4%B9%88flash-attention%E8%83%BD%E5%B0%86%E6%B3%A8%E6%84%8F%E5%8A%9B%E8%AE%A1%E7%AE%97%E6%8F%90%E9%80%9F%E6%95%B0%E5%80%8D%E8%80%8C%E4%B8%8D%E6%8D%9F%E5%A4%B1%E7%B2%BE%E5%BA%A6%E4%BB%8Egpu%E5%86%85%E5%AD%98%E5%A2%99%E5%88%B0io%E6%84%9F%E7%9F%A5%E7%AE%97%E6%B3%95%E7%9A%84%E6%8A%80%E6%9C%AF%E7%AA%81%E5%9B%B4/</guid>
      <description>深度解析Flash Attention如何通过IO感知算法设计突破GPU内存墙瓶颈，实现注意力计算的数倍加速。从GPU内存层级到分块计算，全面揭示这项改变大模型训练格局的核心技术。</description>
    </item>
  </channel>
</rss>
