<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:content="http://purl.org/rss/1.0/modules/content/">
  <channel>
    <title>系统优化 on Answer</title>
    <link>https://answer.freetools.me/categories/%E7%B3%BB%E7%BB%9F%E4%BC%98%E5%8C%96/</link>
    <description>Recent content in 系统优化 on Answer</description>
    <generator>Hugo -- 0.152.2</generator>
    <language>zh-cn</language>
    <lastBuildDate>Sat, 21 Mar 2026 15:50:17 +0800</lastBuildDate>
    <atom:link href="https://answer.freetools.me/categories/%E7%B3%BB%E7%BB%9F%E4%BC%98%E5%8C%96/index.xml" rel="self" type="application/rss+xml" />
    <item>
      <title>大模型训练的显存瓶颈如何突破：从ZeRO到Flash Attention的五年技术演进</title>
      <link>https://answer.freetools.me/%E5%A4%A7%E6%A8%A1%E5%9E%8B%E8%AE%AD%E7%BB%83%E7%9A%84%E6%98%BE%E5%AD%98%E7%93%B6%E9%A2%88%E5%A6%82%E4%BD%95%E7%AA%81%E7%A0%B4%E4%BB%8Ezero%E5%88%B0flash-attention%E7%9A%84%E4%BA%94%E5%B9%B4%E6%8A%80%E6%9C%AF%E6%BC%94%E8%BF%9B/</link>
      <pubDate>Sat, 21 Mar 2026 15:50:17 +0800</pubDate>
      <guid>https://answer.freetools.me/%E5%A4%A7%E6%A8%A1%E5%9E%8B%E8%AE%AD%E7%BB%83%E7%9A%84%E6%98%BE%E5%AD%98%E7%93%B6%E9%A2%88%E5%A6%82%E4%BD%95%E7%AA%81%E7%A0%B4%E4%BB%8Ezero%E5%88%B0flash-attention%E7%9A%84%E4%BA%94%E5%B9%B4%E6%8A%80%E6%9C%AF%E6%BC%94%E8%BF%9B/</guid>
      <description>深入解析大模型训练中的显存瓶颈问题，从ZeRO的分片策略到Flash Attention的IO感知算法，全面梳理五年来的显存优化技术演进。包含详细的内存计算公式、技术对比和实战配置指南。</description>
    </item>
    <item>
      <title>大页内存为何能让数据库性能翻倍？从TLB原理到实战配置的完整解析</title>
      <link>https://answer.freetools.me/%E5%A4%A7%E9%A1%B5%E5%86%85%E5%AD%98%E4%B8%BA%E4%BD%95%E8%83%BD%E8%AE%A9%E6%95%B0%E6%8D%AE%E5%BA%93%E6%80%A7%E8%83%BD%E7%BF%BB%E5%80%8D%E4%BB%8Etlb%E5%8E%9F%E7%90%86%E5%88%B0%E5%AE%9E%E6%88%98%E9%85%8D%E7%BD%AE%E7%9A%84%E5%AE%8C%E6%95%B4%E8%A7%A3%E6%9E%90/</link>
      <pubDate>Fri, 13 Mar 2026 05:48:34 +0800</pubDate>
      <guid>https://answer.freetools.me/%E5%A4%A7%E9%A1%B5%E5%86%85%E5%AD%98%E4%B8%BA%E4%BD%95%E8%83%BD%E8%AE%A9%E6%95%B0%E6%8D%AE%E5%BA%93%E6%80%A7%E8%83%BD%E7%BF%BB%E5%80%8D%E4%BB%8Etlb%E5%8E%9F%E7%90%86%E5%88%B0%E5%AE%9E%E6%88%98%E9%85%8D%E7%BD%AE%E7%9A%84%E5%AE%8C%E6%95%B4%E8%A7%A3%E6%9E%90/</guid>
      <description>深入解析大页内存（Huge Pages）如何通过优化TLB命中率提升数据库、虚拟机和AI推理性能，包含详细的性能数据、配置方法和权衡分析</description>
    </item>
    <item>
      <title>KV Cache压缩如何让大模型突破百万token上下文——从Attention Sink到Heavy Hitter的技术突围</title>
      <link>https://answer.freetools.me/kv-cache%E5%8E%8B%E7%BC%A9%E5%A6%82%E4%BD%95%E8%AE%A9%E5%A4%A7%E6%A8%A1%E5%9E%8B%E7%AA%81%E7%A0%B4%E7%99%BE%E4%B8%87token%E4%B8%8A%E4%B8%8B%E6%96%87%E4%BB%8Eattention-sink%E5%88%B0heavy-hitter%E7%9A%84%E6%8A%80%E6%9C%AF%E7%AA%81%E5%9B%B4/</link>
      <pubDate>Mon, 09 Mar 2026 07:49:36 +0800</pubDate>
      <guid>https://answer.freetools.me/kv-cache%E5%8E%8B%E7%BC%A9%E5%A6%82%E4%BD%95%E8%AE%A9%E5%A4%A7%E6%A8%A1%E5%9E%8B%E7%AA%81%E7%A0%B4%E7%99%BE%E4%B8%87token%E4%B8%8A%E4%B8%8B%E6%96%87%E4%BB%8Eattention-sink%E5%88%B0heavy-hitter%E7%9A%84%E6%8A%80%E6%9C%AF%E7%AA%81%E5%9B%B4/</guid>
      <description>深入解析KV Cache压缩技术如何突破大模型推理的内存瓶颈。从StreamingLLM的Attention Sink到H2O的Heavy Hitter，从KIVI的非对称量化到KVQuant的超低比特压缩，全面揭示这项决定大模型上下文长度的核心技术。</description>
    </item>
    <item>
      <title>一个请求先结束为何整批都要等从静态批处理到连续批处理的LLM推理革命</title>
      <link>https://answer.freetools.me/%E4%B8%80%E4%B8%AA%E8%AF%B7%E6%B1%82%E5%85%88%E7%BB%93%E6%9D%9F%E4%B8%BA%E4%BD%95%E6%95%B4%E6%89%B9%E9%83%BD%E8%A6%81%E7%AD%89%E4%BB%8E%E9%9D%99%E6%80%81%E6%89%B9%E5%A4%84%E7%90%86%E5%88%B0%E8%BF%9E%E7%BB%AD%E6%89%B9%E5%A4%84%E7%90%86%E7%9A%84llm%E6%8E%A8%E7%90%86%E9%9D%A9%E5%91%BD/</link>
      <pubDate>Mon, 09 Mar 2026 04:21:14 +0800</pubDate>
      <guid>https://answer.freetools.me/%E4%B8%80%E4%B8%AA%E8%AF%B7%E6%B1%82%E5%85%88%E7%BB%93%E6%9D%9F%E4%B8%BA%E4%BD%95%E6%95%B4%E6%89%B9%E9%83%BD%E8%A6%81%E7%AD%89%E4%BB%8E%E9%9D%99%E6%80%81%E6%89%B9%E5%A4%84%E7%90%86%E5%88%B0%E8%BF%9E%E7%BB%AD%E6%89%B9%E5%A4%84%E7%90%86%E7%9A%84llm%E6%8E%A8%E7%90%86%E9%9D%A9%E5%91%BD/</guid>
      <description>深入解析连续批处理（Continuous Batching）如何解决大模型推理中的GPU利用率问题。从ORCA论文的迭代级调度到vLLM的实现，全面阐述这项让推理吞吐量提升数十倍的核心技术。涵盖prefill与decode阶段、KV缓存、选择性批处理、chunked prefill等关键技术。</description>
    </item>
    <item>
      <title>为什么PagedAttention能让大模型推理吞吐量提升数倍——从KV Cache内存碎片到分页管理的系统级优化</title>
      <link>https://answer.freetools.me/%E4%B8%BA%E4%BB%80%E4%B9%88pagedattention%E8%83%BD%E8%AE%A9%E5%A4%A7%E6%A8%A1%E5%9E%8B%E6%8E%A8%E7%90%86%E5%90%9E%E5%90%90%E9%87%8F%E6%8F%90%E5%8D%87%E6%95%B0%E5%80%8D%E4%BB%8Ekv-cache%E5%86%85%E5%AD%98%E7%A2%8E%E7%89%87%E5%88%B0%E5%88%86%E9%A1%B5%E7%AE%A1%E7%90%86%E7%9A%84%E7%B3%BB%E7%BB%9F%E7%BA%A7%E4%BC%98%E5%8C%96/</link>
      <pubDate>Mon, 09 Mar 2026 04:02:20 +0800</pubDate>
      <guid>https://answer.freetools.me/%E4%B8%BA%E4%BB%80%E4%B9%88pagedattention%E8%83%BD%E8%AE%A9%E5%A4%A7%E6%A8%A1%E5%9E%8B%E6%8E%A8%E7%90%86%E5%90%9E%E5%90%90%E9%87%8F%E6%8F%90%E5%8D%87%E6%95%B0%E5%80%8D%E4%BB%8Ekv-cache%E5%86%85%E5%AD%98%E7%A2%8E%E7%89%87%E5%88%B0%E5%88%86%E9%A1%B5%E7%AE%A1%E7%90%86%E7%9A%84%E7%B3%BB%E7%BB%9F%E7%BA%A7%E4%BC%98%E5%8C%96/</guid>
      <description>深度解析PagedAttention如何借鉴操作系统虚拟内存技术解决KV Cache内存碎片问题。从三种内存碎片的根本原因到block table映射机制，全面阐述这项让LLM推理吞吐量提升2-4倍的核心技术。</description>
    </item>
    <item>
      <title>为什么Flash Attention能将注意力计算提速数倍而不损失精度——从GPU内存墙到IO感知算法的技术突围</title>
      <link>https://answer.freetools.me/%E4%B8%BA%E4%BB%80%E4%B9%88flash-attention%E8%83%BD%E5%B0%86%E6%B3%A8%E6%84%8F%E5%8A%9B%E8%AE%A1%E7%AE%97%E6%8F%90%E9%80%9F%E6%95%B0%E5%80%8D%E8%80%8C%E4%B8%8D%E6%8D%9F%E5%A4%B1%E7%B2%BE%E5%BA%A6%E4%BB%8Egpu%E5%86%85%E5%AD%98%E5%A2%99%E5%88%B0io%E6%84%9F%E7%9F%A5%E7%AE%97%E6%B3%95%E7%9A%84%E6%8A%80%E6%9C%AF%E7%AA%81%E5%9B%B4/</link>
      <pubDate>Mon, 09 Mar 2026 03:57:50 +0800</pubDate>
      <guid>https://answer.freetools.me/%E4%B8%BA%E4%BB%80%E4%B9%88flash-attention%E8%83%BD%E5%B0%86%E6%B3%A8%E6%84%8F%E5%8A%9B%E8%AE%A1%E7%AE%97%E6%8F%90%E9%80%9F%E6%95%B0%E5%80%8D%E8%80%8C%E4%B8%8D%E6%8D%9F%E5%A4%B1%E7%B2%BE%E5%BA%A6%E4%BB%8Egpu%E5%86%85%E5%AD%98%E5%A2%99%E5%88%B0io%E6%84%9F%E7%9F%A5%E7%AE%97%E6%B3%95%E7%9A%84%E6%8A%80%E6%9C%AF%E7%AA%81%E5%9B%B4/</guid>
      <description>深度解析Flash Attention如何通过IO感知算法设计突破GPU内存墙瓶颈，实现注意力计算的数倍加速。从GPU内存层级到分块计算，全面揭示这项改变大模型训练格局的核心技术。</description>
    </item>
    <item>
      <title>电脑为何越用越慢：从软件膨胀到硬件老化的技术真相</title>
      <link>https://answer.freetools.me/%E7%94%B5%E8%84%91%E4%B8%BA%E4%BD%95%E8%B6%8A%E7%94%A8%E8%B6%8A%E6%85%A2%E4%BB%8E%E8%BD%AF%E4%BB%B6%E8%86%A8%E8%83%80%E5%88%B0%E7%A1%AC%E4%BB%B6%E8%80%81%E5%8C%96%E7%9A%84%E6%8A%80%E6%9C%AF%E7%9C%9F%E7%9B%B8/</link>
      <pubDate>Sat, 07 Mar 2026 23:40:44 +0800</pubDate>
      <guid>https://answer.freetools.me/%E7%94%B5%E8%84%91%E4%B8%BA%E4%BD%95%E8%B6%8A%E7%94%A8%E8%B6%8A%E6%85%A2%E4%BB%8E%E8%BD%AF%E4%BB%B6%E8%86%A8%E8%83%80%E5%88%B0%E7%A1%AC%E4%BB%B6%E8%80%81%E5%8C%96%E7%9A%84%E6%8A%80%E6%9C%AF%E7%9C%9F%E7%9B%B8/</guid>
      <description>深入解析电脑性能随时间下降的多重原因。从Wirth定律揭示的软件膨胀困境，到SSD写入放大的存储物理限制，再到热节流与电迁移的硬件退化机制，系统阐述为何这台机器在几年后变得如此缓慢。涵盖后台进程累积、内存泄漏、磁盘碎片化、TRIM命令、散热系统失效等关键因素，并提供基于工程原理的优化建议。</description>
    </item>
    <item>
      <title>GPU显存为何总是不够用：从内存墙到KV Cache碎片化的技术突围</title>
      <link>https://answer.freetools.me/gpu%E6%98%BE%E5%AD%98%E4%B8%BA%E4%BD%95%E6%80%BB%E6%98%AF%E4%B8%8D%E5%A4%9F%E7%94%A8%E4%BB%8E%E5%86%85%E5%AD%98%E5%A2%99%E5%88%B0kv-cache%E7%A2%8E%E7%89%87%E5%8C%96%E7%9A%84%E6%8A%80%E6%9C%AF%E7%AA%81%E5%9B%B4/</link>
      <pubDate>Fri, 06 Mar 2026 22:30:03 +0800</pubDate>
      <guid>https://answer.freetools.me/gpu%E6%98%BE%E5%AD%98%E4%B8%BA%E4%BD%95%E6%80%BB%E6%98%AF%E4%B8%8D%E5%A4%9F%E7%94%A8%E4%BB%8E%E5%86%85%E5%AD%98%E5%A2%99%E5%88%B0kv-cache%E7%A2%8E%E7%89%87%E5%8C%96%E7%9A%84%E6%8A%80%E6%9C%AF%E7%AA%81%E5%9B%B4/</guid>
      <description>深度解析GPU显存瓶颈的本质原因，从硬件层面的内存墙问题到软件层面的KV Cache管理困境，全面剖析PagedAttention、FlashAttention等突破性技术的原理与权衡。</description>
    </item>
    <item>
      <title>压缩算法的五十年技术博弈：为什么速度和压缩比从来不能兼得</title>
      <link>https://answer.freetools.me/%E5%8E%8B%E7%BC%A9%E7%AE%97%E6%B3%95%E7%9A%84%E4%BA%94%E5%8D%81%E5%B9%B4%E6%8A%80%E6%9C%AF%E5%8D%9A%E5%BC%88%E4%B8%BA%E4%BB%80%E4%B9%88%E9%80%9F%E5%BA%A6%E5%92%8C%E5%8E%8B%E7%BC%A9%E6%AF%94%E4%BB%8E%E6%9D%A5%E4%B8%8D%E8%83%BD%E5%85%BC%E5%BE%97/</link>
      <pubDate>Fri, 06 Mar 2026 11:41:20 +0800</pubDate>
      <guid>https://answer.freetools.me/%E5%8E%8B%E7%BC%A9%E7%AE%97%E6%B3%95%E7%9A%84%E4%BA%94%E5%8D%81%E5%B9%B4%E6%8A%80%E6%9C%AF%E5%8D%9A%E5%BC%88%E4%B8%BA%E4%BB%80%E4%B9%88%E9%80%9F%E5%BA%A6%E5%92%8C%E5%8E%8B%E7%BC%A9%E6%AF%94%E4%BB%8E%E6%9D%A5%E4%B8%8D%E8%83%BD%E5%85%BC%E5%BE%97/</guid>
      <description>从1977年LZ77算法诞生到2020年代zstd和Brotli的崛起，深度剖析压缩算法的技术演进历程。基于香农信息论、霍夫曼编码、算术编码、ANS等理论基础，对比DEFLATE、LZ4、Brotli、Zstandard的核心设计差异，揭示速度与压缩比之间的根本权衡。涵盖字典压缩、熵编码、滑动窗口、Repcode建模等关键技术，以及在Web传输、数据库、文件系统等场景的选型指南。</description>
    </item>
  </channel>
</rss>
