<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:content="http://purl.org/rss/1.0/modules/content/">
  <channel>
    <title>NLP on Answer</title>
    <link>https://answer.freetools.me/categories/nlp/</link>
    <description>Recent content in NLP on Answer</description>
    <generator>Hugo -- 0.152.2</generator>
    <language>zh-cn</language>
    <lastBuildDate>Thu, 12 Mar 2026 23:30:51 +0800</lastBuildDate>
    <atom:link href="https://answer.freetools.me/categories/nlp/index.xml" rel="self" type="application/rss+xml" />
    <item>
      <title>Word2Vec：两个模型如何教会机器理解词语之间的关系</title>
      <link>https://answer.freetools.me/word2vec%E4%B8%A4%E4%B8%AA%E6%A8%A1%E5%9E%8B%E5%A6%82%E4%BD%95%E6%95%99%E4%BC%9A%E6%9C%BA%E5%99%A8%E7%90%86%E8%A7%A3%E8%AF%8D%E8%AF%AD%E4%B9%8B%E9%97%B4%E7%9A%84%E5%85%B3%E7%B3%BB/</link>
      <pubDate>Thu, 12 Mar 2026 23:30:51 +0800</pubDate>
      <guid>https://answer.freetools.me/word2vec%E4%B8%A4%E4%B8%AA%E6%A8%A1%E5%9E%8B%E5%A6%82%E4%BD%95%E6%95%99%E4%BC%9A%E6%9C%BA%E5%99%A8%E7%90%86%E8%A7%A3%E8%AF%8D%E8%AF%AD%E4%B9%8B%E9%97%B4%E7%9A%84%E5%85%B3%E7%B3%BB/</guid>
      <description>深入解析Word2Vec的核心原理、Skip-gram与CBOW架构、负采样与层次Softmax优化技术，以及从词向量类比到现代大模型嵌入层的完整技术演进。</description>
    </item>
    <item>
      <title>大模型为何读不懂&#34;不&#34;字：从注意力机制到训练数据的否定词困境</title>
      <link>https://answer.freetools.me/%E5%A4%A7%E6%A8%A1%E5%9E%8B%E4%B8%BA%E4%BD%95%E8%AF%BB%E4%B8%8D%E6%87%82%E4%B8%8D%E5%AD%97%E4%BB%8E%E6%B3%A8%E6%84%8F%E5%8A%9B%E6%9C%BA%E5%88%B6%E5%88%B0%E8%AE%AD%E7%BB%83%E6%95%B0%E6%8D%AE%E7%9A%84%E5%90%A6%E5%AE%9A%E8%AF%8D%E5%9B%B0%E5%A2%83/</link>
      <pubDate>Thu, 12 Mar 2026 20:41:49 +0800</pubDate>
      <guid>https://answer.freetools.me/%E5%A4%A7%E6%A8%A1%E5%9E%8B%E4%B8%BA%E4%BD%95%E8%AF%BB%E4%B8%8D%E6%87%82%E4%B8%8D%E5%AD%97%E4%BB%8E%E6%B3%A8%E6%84%8F%E5%8A%9B%E6%9C%BA%E5%88%B6%E5%88%B0%E8%AE%AD%E7%BB%83%E6%95%B0%E6%8D%AE%E7%9A%84%E5%90%A6%E5%AE%9A%E8%AF%8D%E5%9B%B0%E5%A2%83/</guid>
      <description>深入解析大语言模型处理否定词的困境：从CondaQA基准测试的42%准确率到视觉语言模型的随机表现，揭示否定词理解失败的技术根源。涵盖否定词的语言学分类、注意力机制的内在缺陷、训练数据分布偏差，以及从自监督预训练到提示词工程的完整解决方案。</description>
    </item>
    <item>
      <title>Embedding层：从离散符号到语义空间的第一步</title>
      <link>https://answer.freetools.me/embedding%E5%B1%82%E4%BB%8E%E7%A6%BB%E6%95%A3%E7%AC%A6%E5%8F%B7%E5%88%B0%E8%AF%AD%E4%B9%89%E7%A9%BA%E9%97%B4%E7%9A%84%E7%AC%AC%E4%B8%80%E6%AD%A5/</link>
      <pubDate>Wed, 11 Mar 2026 21:51:14 +0800</pubDate>
      <guid>https://answer.freetools.me/embedding%E5%B1%82%E4%BB%8E%E7%A6%BB%E6%95%A3%E7%AC%A6%E5%8F%B7%E5%88%B0%E8%AF%AD%E4%B9%89%E7%A9%BA%E9%97%B4%E7%9A%84%E7%AC%AC%E4%B8%80%E6%AD%A5/</guid>
      <description>深入解析大语言模型Embedding层的工作原理：从Token ID到高维向量的映射机制，涵盖查找表实现、权重共享、梯度传播、维度选择权衡，以及静态与上下文Embedding的本质差异。</description>
    </item>
    <item>
      <title>困惑度如何成为语言模型评估的黄金标准：从信息论到现代大模型的五十年演进</title>
      <link>https://answer.freetools.me/%E5%9B%B0%E6%83%91%E5%BA%A6%E5%A6%82%E4%BD%95%E6%88%90%E4%B8%BA%E8%AF%AD%E8%A8%80%E6%A8%A1%E5%9E%8B%E8%AF%84%E4%BC%B0%E7%9A%84%E9%BB%84%E9%87%91%E6%A0%87%E5%87%86%E4%BB%8E%E4%BF%A1%E6%81%AF%E8%AE%BA%E5%88%B0%E7%8E%B0%E4%BB%A3%E5%A4%A7%E6%A8%A1%E5%9E%8B%E7%9A%84%E4%BA%94%E5%8D%81%E5%B9%B4%E6%BC%94%E8%BF%9B/</link>
      <pubDate>Wed, 11 Mar 2026 20:59:11 +0800</pubDate>
      <guid>https://answer.freetools.me/%E5%9B%B0%E6%83%91%E5%BA%A6%E5%A6%82%E4%BD%95%E6%88%90%E4%B8%BA%E8%AF%AD%E8%A8%80%E6%A8%A1%E5%9E%8B%E8%AF%84%E4%BC%B0%E7%9A%84%E9%BB%84%E9%87%91%E6%A0%87%E5%87%86%E4%BB%8E%E4%BF%A1%E6%81%AF%E8%AE%BA%E5%88%B0%E7%8E%B0%E4%BB%A3%E5%A4%A7%E6%A8%A1%E5%9E%8B%E7%9A%84%E4%BA%94%E5%8D%81%E5%B9%B4%E6%BC%94%E8%BF%9B/</guid>
      <description>深入解析困惑度（Perplexity）作为语言模型评估指标的数学原理、信息论根基、分支因子直观解释、计算方法、以及其在现代大模型评估中的局限性与演进。从1977年IBM的语音识别研究到GPT时代的评估困境，揭示这个看似简单的指标背后的深层逻辑。</description>
    </item>
    <item>
      <title>Tokenizer 如何塑造大语言模型的世界观：从 BPE 到 Byte Latent Transformer 的三十年技术演进</title>
      <link>https://answer.freetools.me/tokenizer-%E5%A6%82%E4%BD%95%E5%A1%91%E9%80%A0%E5%A4%A7%E8%AF%AD%E8%A8%80%E6%A8%A1%E5%9E%8B%E7%9A%84%E4%B8%96%E7%95%8C%E8%A7%82%E4%BB%8E-bpe-%E5%88%B0-byte-latent-transformer-%E7%9A%84%E4%B8%89%E5%8D%81%E5%B9%B4%E6%8A%80%E6%9C%AF%E6%BC%94%E8%BF%9B/</link>
      <pubDate>Wed, 11 Mar 2026 14:01:29 +0800</pubDate>
      <guid>https://answer.freetools.me/tokenizer-%E5%A6%82%E4%BD%95%E5%A1%91%E9%80%A0%E5%A4%A7%E8%AF%AD%E8%A8%80%E6%A8%A1%E5%9E%8B%E7%9A%84%E4%B8%96%E7%95%8C%E8%A7%82%E4%BB%8E-bpe-%E5%88%B0-byte-latent-transformer-%E7%9A%84%E4%B8%89%E5%8D%81%E5%B9%B4%E6%8A%80%E6%9C%AF%E6%BC%94%E8%BF%9B/</guid>
      <description>深入解析大语言模型 Tokenizer 的工作原理：从 BPE、WordPiece 到 Unigram 三种主流算法的技术差异，到 tokenization 对算术推理、多语言处理、字符级任务的深层影响，以及 Byte Latent Transformer 等无 tokenizer 架构的未来探索。</description>
    </item>
  </channel>
</rss>
