<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:content="http://purl.org/rss/1.0/modules/content/">
  <channel>
    <title>神经网络 on Answer</title>
    <link>https://answer.freetools.me/tags/%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C/</link>
    <description>Recent content in 神经网络 on Answer</description>
    <generator>Hugo -- 0.152.2</generator>
    <language>zh-cn</language>
    <lastBuildDate>Fri, 13 Mar 2026 00:53:12 +0800</lastBuildDate>
    <atom:link href="https://answer.freetools.me/tags/%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C/index.xml" rel="self" type="application/rss+xml" />
    <item>
      <title>神经网络的优化景观：为什么非凸损失函数没有想象中可怕</title>
      <link>https://answer.freetools.me/%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E7%9A%84%E4%BC%98%E5%8C%96%E6%99%AF%E8%A7%82%E4%B8%BA%E4%BB%80%E4%B9%88%E9%9D%9E%E5%87%B8%E6%8D%9F%E5%A4%B1%E5%87%BD%E6%95%B0%E6%B2%A1%E6%9C%89%E6%83%B3%E8%B1%A1%E4%B8%AD%E5%8F%AF%E6%80%95/</link>
      <pubDate>Fri, 13 Mar 2026 00:53:12 +0800</pubDate>
      <guid>https://answer.freetools.me/%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E7%9A%84%E4%BC%98%E5%8C%96%E6%99%AF%E8%A7%82%E4%B8%BA%E4%BB%80%E4%B9%88%E9%9D%9E%E5%87%B8%E6%8D%9F%E5%A4%B1%E5%87%BD%E6%95%B0%E6%B2%A1%E6%9C%89%E6%83%B3%E8%B1%A1%E4%B8%AD%E5%8F%AF%E6%80%95/</guid>
      <description>从损失景观的几何性质出发，深入解析为什么梯度下降能够在高度非凸的高维空间中找到好解。涵盖鞍点、平坦最小值、模式连通性等核心概念，以及残差连接、批量归一化等技术如何重塑优化景观。</description>
    </item>
    <item>
      <title>万能逼近定理：为什么两层神经网络能逼近任意函数</title>
      <link>https://answer.freetools.me/%E4%B8%87%E8%83%BD%E9%80%BC%E8%BF%91%E5%AE%9A%E7%90%86%E4%B8%BA%E4%BB%80%E4%B9%88%E4%B8%A4%E5%B1%82%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E8%83%BD%E9%80%BC%E8%BF%91%E4%BB%BB%E6%84%8F%E5%87%BD%E6%95%B0/</link>
      <pubDate>Thu, 12 Mar 2026 22:35:33 +0800</pubDate>
      <guid>https://answer.freetools.me/%E4%B8%87%E8%83%BD%E9%80%BC%E8%BF%91%E5%AE%9A%E7%90%86%E4%B8%BA%E4%BB%80%E4%B9%88%E4%B8%A4%E5%B1%82%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E8%83%BD%E9%80%BC%E8%BF%91%E4%BB%BB%E6%84%8F%E5%87%BD%E6%95%B0/</guid>
      <description>从Cybenko 1989的数学证明到Telgarsky的深度优势理论，深入解析万能逼近定理的内涵、证明思路、局限性以及对深度学习实践的指导意义</description>
    </item>
    <item>
      <title>Self-Attention计算全解：从矩阵乘法到梯度流动的完整技术解析</title>
      <link>https://answer.freetools.me/self-attention%E8%AE%A1%E7%AE%97%E5%85%A8%E8%A7%A3%E4%BB%8E%E7%9F%A9%E9%98%B5%E4%B9%98%E6%B3%95%E5%88%B0%E6%A2%AF%E5%BA%A6%E6%B5%81%E5%8A%A8%E7%9A%84%E5%AE%8C%E6%95%B4%E6%8A%80%E6%9C%AF%E8%A7%A3%E6%9E%90/</link>
      <pubDate>Thu, 12 Mar 2026 18:36:22 +0800</pubDate>
      <guid>https://answer.freetools.me/self-attention%E8%AE%A1%E7%AE%97%E5%85%A8%E8%A7%A3%E4%BB%8E%E7%9F%A9%E9%98%B5%E4%B9%98%E6%B3%95%E5%88%B0%E6%A2%AF%E5%BA%A6%E6%B5%81%E5%8A%A8%E7%9A%84%E5%AE%8C%E6%95%B4%E6%8A%80%E6%9C%AF%E8%A7%A3%E6%9E%90/</guid>
      <description>深入解析Transformer中Self-Attention的完整计算流程，从Query/Key/Value的直观含义到多头注意力的实现细节，涵盖注意力分数计算、缩放原理、掩码机制、残差连接等核心技术，以及面试高频考点与常见误区。</description>
    </item>
    <item>
      <title>模型训练调试：从损失不下降到梯度爆炸的完整诊断指南</title>
      <link>https://answer.freetools.me/%E6%A8%A1%E5%9E%8B%E8%AE%AD%E7%BB%83%E8%B0%83%E8%AF%95%E4%BB%8E%E6%8D%9F%E5%A4%B1%E4%B8%8D%E4%B8%8B%E9%99%8D%E5%88%B0%E6%A2%AF%E5%BA%A6%E7%88%86%E7%82%B8%E7%9A%84%E5%AE%8C%E6%95%B4%E8%AF%8A%E6%96%AD%E6%8C%87%E5%8D%97/</link>
      <pubDate>Thu, 12 Mar 2026 17:08:07 +0800</pubDate>
      <guid>https://answer.freetools.me/%E6%A8%A1%E5%9E%8B%E8%AE%AD%E7%BB%83%E8%B0%83%E8%AF%95%E4%BB%8E%E6%8D%9F%E5%A4%B1%E4%B8%8D%E4%B8%8B%E9%99%8D%E5%88%B0%E6%A2%AF%E5%BA%A6%E7%88%86%E7%82%B8%E7%9A%84%E5%AE%8C%E6%95%B4%E8%AF%8A%E6%96%AD%E6%8C%87%E5%8D%97/</guid>
      <description>系统性梳理神经网络训练调试的方法论，从损失曲线诊断到梯度检查，涵盖学习率问题、模型容量问题、数据问题等多种常见训练失败的诊断与解决方案。</description>
    </item>
    <item>
      <title>神经网络是如何学习的：从前向传播到反向传播的完整训练过程解析</title>
      <link>https://answer.freetools.me/%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E6%98%AF%E5%A6%82%E4%BD%95%E5%AD%A6%E4%B9%A0%E7%9A%84%E4%BB%8E%E5%89%8D%E5%90%91%E4%BC%A0%E6%92%AD%E5%88%B0%E5%8F%8D%E5%90%91%E4%BC%A0%E6%92%AD%E7%9A%84%E5%AE%8C%E6%95%B4%E8%AE%AD%E7%BB%83%E8%BF%87%E7%A8%8B%E8%A7%A3%E6%9E%90/</link>
      <pubDate>Thu, 12 Mar 2026 16:06:50 +0800</pubDate>
      <guid>https://answer.freetools.me/%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E6%98%AF%E5%A6%82%E4%BD%95%E5%AD%A6%E4%B9%A0%E7%9A%84%E4%BB%8E%E5%89%8D%E5%90%91%E4%BC%A0%E6%92%AD%E5%88%B0%E5%8F%8D%E5%90%91%E4%BC%A0%E6%92%AD%E7%9A%84%E5%AE%8C%E6%95%B4%E8%AE%AD%E7%BB%83%E8%BF%87%E7%A8%8B%E8%A7%A3%E6%9E%90/</guid>
      <description>神经网络是如何学习的：从前向传播到反向传播的完整训练过程解析</description>
    </item>
    <item>
      <title>神经网络中的偏置：为什么简单的加法如此重要</title>
      <link>https://answer.freetools.me/%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E4%B8%AD%E7%9A%84%E5%81%8F%E7%BD%AE%E4%B8%BA%E4%BB%80%E4%B9%88%E7%AE%80%E5%8D%95%E7%9A%84%E5%8A%A0%E6%B3%95%E5%A6%82%E6%AD%A4%E9%87%8D%E8%A6%81/</link>
      <pubDate>Thu, 12 Mar 2026 09:47:41 +0800</pubDate>
      <guid>https://answer.freetools.me/%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E4%B8%AD%E7%9A%84%E5%81%8F%E7%BD%AE%E4%B8%BA%E4%BB%80%E4%B9%88%E7%AE%80%E5%8D%95%E7%9A%84%E5%8A%A0%E6%B3%95%E5%A6%82%E6%AD%A4%E9%87%8D%E8%A6%81/</guid>
      <description>从感知机的决策边界到现代大语言模型的架构简化，深入解析神经网络中偏置项的数学本质、在不同层中的作用、以及为什么有些架构选择移除它。</description>
    </item>
    <item>
      <title>Sigmoid与Softmax：多分类与多标签任务的激活函数选择逻辑</title>
      <link>https://answer.freetools.me/sigmoid%E4%B8%8Esoftmax%E5%A4%9A%E5%88%86%E7%B1%BB%E4%B8%8E%E5%A4%9A%E6%A0%87%E7%AD%BE%E4%BB%BB%E5%8A%A1%E7%9A%84%E6%BF%80%E6%B4%BB%E5%87%BD%E6%95%B0%E9%80%89%E6%8B%A9%E9%80%BB%E8%BE%91/</link>
      <pubDate>Thu, 12 Mar 2026 08:45:14 +0800</pubDate>
      <guid>https://answer.freetools.me/sigmoid%E4%B8%8Esoftmax%E5%A4%9A%E5%88%86%E7%B1%BB%E4%B8%8E%E5%A4%9A%E6%A0%87%E7%AD%BE%E4%BB%BB%E5%8A%A1%E7%9A%84%E6%BF%80%E6%B4%BB%E5%87%BD%E6%95%B0%E9%80%89%E6%8B%A9%E9%80%BB%E8%BE%91/</guid>
      <description>深入解析Sigmoid与Softmax激活函数的本质差异、数学原理、梯度特性，以及在多分类与多标签任务中的正确选择方法。从概率分布假设到训练动态，全面揭示这个面试高频问题背后的技术真相。</description>
    </item>
    <item>
      <title>Logits：神经网络输出的原始真相，从概念到实践的完整解析</title>
      <link>https://answer.freetools.me/logits%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E8%BE%93%E5%87%BA%E7%9A%84%E5%8E%9F%E5%A7%8B%E7%9C%9F%E7%9B%B8%E4%BB%8E%E6%A6%82%E5%BF%B5%E5%88%B0%E5%AE%9E%E8%B7%B5%E7%9A%84%E5%AE%8C%E6%95%B4%E8%A7%A3%E6%9E%90/</link>
      <pubDate>Thu, 12 Mar 2026 07:33:21 +0800</pubDate>
      <guid>https://answer.freetools.me/logits%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E8%BE%93%E5%87%BA%E7%9A%84%E5%8E%9F%E5%A7%8B%E7%9C%9F%E7%9B%B8%E4%BB%8E%E6%A6%82%E5%BF%B5%E5%88%B0%E5%AE%9E%E8%B7%B5%E7%9A%84%E5%AE%8C%E6%95%B4%E8%A7%A3%E6%9E%90/</guid>
      <description>深入解析神经网络中logits的本质：从统计学中的log-odds概念起源，到现代深度学习中作为模型&amp;#34;思考过程&amp;#34;的核心载体。涵盖logits与softmax的数学关系、温度参数的作用机制、logit bias等处理技术、以及在知识蒸馏、模型校准、不确定性量化等场景的实际应用。</description>
    </item>
    <item>
      <title>为什么大模型连两位数加法都算不准：从tokenization到启发式神经元的完整技术解析</title>
      <link>https://answer.freetools.me/%E4%B8%BA%E4%BB%80%E4%B9%88%E5%A4%A7%E6%A8%A1%E5%9E%8B%E8%BF%9E%E4%B8%A4%E4%BD%8D%E6%95%B0%E5%8A%A0%E6%B3%95%E9%83%BD%E7%AE%97%E4%B8%8D%E5%87%86%E4%BB%8Etokenization%E5%88%B0%E5%90%AF%E5%8F%91%E5%BC%8F%E7%A5%9E%E7%BB%8F%E5%85%83%E7%9A%84%E5%AE%8C%E6%95%B4%E6%8A%80%E6%9C%AF%E8%A7%A3%E6%9E%90/</link>
      <pubDate>Thu, 12 Mar 2026 03:10:03 +0800</pubDate>
      <guid>https://answer.freetools.me/%E4%B8%BA%E4%BB%80%E4%B9%88%E5%A4%A7%E6%A8%A1%E5%9E%8B%E8%BF%9E%E4%B8%A4%E4%BD%8D%E6%95%B0%E5%8A%A0%E6%B3%95%E9%83%BD%E7%AE%97%E4%B8%8D%E5%87%86%E4%BB%8Etokenization%E5%88%B0%E5%90%AF%E5%8F%91%E5%BC%8F%E7%A5%9E%E7%BB%8F%E5%85%83%E7%9A%84%E5%AE%8C%E6%95%B4%E6%8A%80%E6%9C%AF%E8%A7%A3%E6%9E%90/</guid>
      <description>深入解析大语言模型算术能力受限的技术根源：从tokenization对数字的不一致切分、神经网络&amp;#34;启发式袋&amp;#34;机制替代真正算法、到位置编码导致数位信息丢失。基于ICLR 2025等前沿研究，揭示为什么能通过律师考试的AI却算不对两位数加法，以及这一发现对AI系统设计的深层启示。</description>
    </item>
    <item>
      <title>标签平滑的默认值为何是0.1：从训练稳定性到收敛理论的数学解析</title>
      <link>https://answer.freetools.me/%E6%A0%87%E7%AD%BE%E5%B9%B3%E6%BB%91%E7%9A%84%E9%BB%98%E8%AE%A4%E5%80%BC%E4%B8%BA%E4%BD%95%E6%98%AF0.1%E4%BB%8E%E8%AE%AD%E7%BB%83%E7%A8%B3%E5%AE%9A%E6%80%A7%E5%88%B0%E6%94%B6%E6%95%9B%E7%90%86%E8%AE%BA%E7%9A%84%E6%95%B0%E5%AD%A6%E8%A7%A3%E6%9E%90/</link>
      <pubDate>Thu, 12 Mar 2026 00:08:10 +0800</pubDate>
      <guid>https://answer.freetools.me/%E6%A0%87%E7%AD%BE%E5%B9%B3%E6%BB%91%E7%9A%84%E9%BB%98%E8%AE%A4%E5%80%BC%E4%B8%BA%E4%BD%95%E6%98%AF0.1%E4%BB%8E%E8%AE%AD%E7%BB%83%E7%A8%B3%E5%AE%9A%E6%80%A7%E5%88%B0%E6%94%B6%E6%95%9B%E7%90%86%E8%AE%BA%E7%9A%84%E6%95%B0%E5%AD%A6%E8%A7%A3%E6%9E%90/</guid>
      <description>深度解析标签平滑技术：为何ε=0.1成为默认值？从Szegedy的Inception到Transformer的训练技巧，揭示其正则化机制、模型校准改进、与知识蒸馏的复杂关系，以及在噪声标签处理中的意外效果。</description>
    </item>
    <item>
      <title>权重初始化：为什么一行代码能决定神经网络的生死</title>
      <link>https://answer.freetools.me/%E6%9D%83%E9%87%8D%E5%88%9D%E5%A7%8B%E5%8C%96%E4%B8%BA%E4%BB%80%E4%B9%88%E4%B8%80%E8%A1%8C%E4%BB%A3%E7%A0%81%E8%83%BD%E5%86%B3%E5%AE%9A%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E7%9A%84%E7%94%9F%E6%AD%BB/</link>
      <pubDate>Wed, 11 Mar 2026 22:16:36 +0800</pubDate>
      <guid>https://answer.freetools.me/%E6%9D%83%E9%87%8D%E5%88%9D%E5%A7%8B%E5%8C%96%E4%B8%BA%E4%BB%80%E4%B9%88%E4%B8%80%E8%A1%8C%E4%BB%A3%E7%A0%81%E8%83%BD%E5%86%B3%E5%AE%9A%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E7%9A%84%E7%94%9F%E6%AD%BB/</guid>
      <description>从零初始化的失败到Xavier和He初始化的数学推导，深入解析神经网络权重初始化的技术原理与实践指南。</description>
    </item>
    <item>
      <title>Dropout机制：为什么随机丢弃神经元反而能提升泛化能力</title>
      <link>https://answer.freetools.me/dropout%E6%9C%BA%E5%88%B6%E4%B8%BA%E4%BB%80%E4%B9%88%E9%9A%8F%E6%9C%BA%E4%B8%A2%E5%BC%83%E7%A5%9E%E7%BB%8F%E5%85%83%E5%8F%8D%E8%80%8C%E8%83%BD%E6%8F%90%E5%8D%87%E6%B3%9B%E5%8C%96%E8%83%BD%E5%8A%9B/</link>
      <pubDate>Wed, 11 Mar 2026 21:31:43 +0800</pubDate>
      <guid>https://answer.freetools.me/dropout%E6%9C%BA%E5%88%B6%E4%B8%BA%E4%BB%80%E4%B9%88%E9%9A%8F%E6%9C%BA%E4%B8%A2%E5%BC%83%E7%A5%9E%E7%BB%8F%E5%85%83%E5%8F%8D%E8%80%8C%E8%83%BD%E6%8F%90%E5%8D%87%E6%B3%9B%E5%8C%96%E8%83%BD%E5%8A%9B/</guid>
      <description>深入解析Dropout正则化技术的核心原理：从神经元共适应问题到集成学习视角，从贝叶斯推断到Transformer中的实际应用，揭示这个看似简单却深刻影响深度学习的技术本质。</description>
    </item>
    <item>
      <title>Softmax函数：为什么这个公式统治了神经网络的概率输出</title>
      <link>https://answer.freetools.me/softmax%E5%87%BD%E6%95%B0%E4%B8%BA%E4%BB%80%E4%B9%88%E8%BF%99%E4%B8%AA%E5%85%AC%E5%BC%8F%E7%BB%9F%E6%B2%BB%E4%BA%86%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E7%9A%84%E6%A6%82%E7%8E%87%E8%BE%93%E5%87%BA/</link>
      <pubDate>Wed, 11 Mar 2026 20:47:14 +0800</pubDate>
      <guid>https://answer.freetools.me/softmax%E5%87%BD%E6%95%B0%E4%B8%BA%E4%BB%80%E4%B9%88%E8%BF%99%E4%B8%AA%E5%85%AC%E5%BC%8F%E7%BB%9F%E6%B2%BB%E4%BA%86%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E7%9A%84%E6%A6%82%E7%8E%87%E8%BE%93%E5%87%BA/</guid>
      <description>从指数函数的放大效应到温度参数的物理意义，从交叉熵损失的完美配合到Transformer注意力机制，深入解析Softmax函数的数学原理、工程实践与替代方案演进。</description>
    </item>
    <item>
      <title>为什么神经网络学会了新知识就会忘记旧知识：从灾难性遗忘到持续学习的技术突围</title>
      <link>https://answer.freetools.me/%E4%B8%BA%E4%BB%80%E4%B9%88%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E5%AD%A6%E4%BC%9A%E4%BA%86%E6%96%B0%E7%9F%A5%E8%AF%86%E5%B0%B1%E4%BC%9A%E5%BF%98%E8%AE%B0%E6%97%A7%E7%9F%A5%E8%AF%86%E4%BB%8E%E7%81%BE%E9%9A%BE%E6%80%A7%E9%81%97%E5%BF%98%E5%88%B0%E6%8C%81%E7%BB%AD%E5%AD%A6%E4%B9%A0%E7%9A%84%E6%8A%80%E6%9C%AF%E7%AA%81%E5%9B%B4/</link>
      <pubDate>Mon, 09 Mar 2026 07:29:58 +0800</pubDate>
      <guid>https://answer.freetools.me/%E4%B8%BA%E4%BB%80%E4%B9%88%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E5%AD%A6%E4%BC%9A%E4%BA%86%E6%96%B0%E7%9F%A5%E8%AF%86%E5%B0%B1%E4%BC%9A%E5%BF%98%E8%AE%B0%E6%97%A7%E7%9F%A5%E8%AF%86%E4%BB%8E%E7%81%BE%E9%9A%BE%E6%80%A7%E9%81%97%E5%BF%98%E5%88%B0%E6%8C%81%E7%BB%AD%E5%AD%A6%E4%B9%A0%E7%9A%84%E6%8A%80%E6%9C%AF%E7%AA%81%E5%9B%B4/</guid>
      <description>深入解析神经网络灾难性遗忘问题的本质、历史沿革与解决方案。从1989年McCloskey和Cohen的经典发现，到2017年EWC方法的突破，再到LLM时代的LoRA与O-LoRA技术演进。系统阐述回放方法、正则化方法、梯度约束和参数隔离四大技术路线，揭示稳定性-可塑性困境的数学本质，以及大模型时代持续学习面临的全新挑战。</description>
    </item>
    <item>
      <title>为什么大模型能压缩到原来的1/4却几乎不损失性能：量化技术的数学真相</title>
      <link>https://answer.freetools.me/%E4%B8%BA%E4%BB%80%E4%B9%88%E5%A4%A7%E6%A8%A1%E5%9E%8B%E8%83%BD%E5%8E%8B%E7%BC%A9%E5%88%B0%E5%8E%9F%E6%9D%A5%E7%9A%841/4%E5%8D%B4%E5%87%A0%E4%B9%8E%E4%B8%8D%E6%8D%9F%E5%A4%B1%E6%80%A7%E8%83%BD%E9%87%8F%E5%8C%96%E6%8A%80%E6%9C%AF%E7%9A%84%E6%95%B0%E5%AD%A6%E7%9C%9F%E7%9B%B8/</link>
      <pubDate>Sun, 08 Mar 2026 14:32:14 +0800</pubDate>
      <guid>https://answer.freetools.me/%E4%B8%BA%E4%BB%80%E4%B9%88%E5%A4%A7%E6%A8%A1%E5%9E%8B%E8%83%BD%E5%8E%8B%E7%BC%A9%E5%88%B0%E5%8E%9F%E6%9D%A5%E7%9A%841/4%E5%8D%B4%E5%87%A0%E4%B9%8E%E4%B8%8D%E6%8D%9F%E5%A4%B1%E6%80%A7%E8%83%BD%E9%87%8F%E5%8C%96%E6%8A%80%E6%9C%AF%E7%9A%84%E6%95%B0%E5%AD%A6%E7%9C%9F%E7%9B%B8/</guid>
      <description>深入解析神经网络量化技术的数学原理与工程实现。从FP32到INT4的8倍压缩背后,揭示了神经网络的冗余性、权重分布特性、硬件优化的三重真相。系统阐述对称/非对称量化、量化误差分析、GPTQ/AWQ/SmoothQuant等核心算法,以及INT8 Tensor Core等硬件加速机制。包含量化在175B参数模型上的实证数据、精度损失的理论分析、以及极低比特量化的技术边界。</description>
    </item>
  </channel>
</rss>
