<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
  <url>
    <loc>https://embeddedllm.com/</loc>
    <priority>1.0</priority>
    <lastmod>2024-07-06</lastmod>
  </url>

  <url>
    <loc>https://embeddedllm.com/newsroom/</loc>
    <priority>1.0</priority>
    <lastmod>2024-07-21</lastmod>
  </url>

  <url>
    <loc>https://embeddedllm.com/blog/</loc>
    <priority>0.8</priority>
    <lastmod>2024-07-06</lastmod>
  </url>

  <url>
    <loc>https://embeddedllm.com/blog/cuda-to-rocm-portability-case-study-liger-kernel/</loc>
    <priority>0.7</priority>
    <lastmod>2024-11-05</lastmod>
  </url>

  <url>
    <loc>https://embeddedllm.com/blog/foseal-hackathon-2025/</loc>
    <priority>0.7</priority>
    <lastmod>2025-06-23</lastmod>
  </url>

  <url>
    <loc>https://embeddedllm.com/blog/how-to-build-vllm-on-mi300x-from-source/</loc>
    <priority>0.7</priority>
    <lastmod>2024-10-11</lastmod>
  </url>

  <url>
    <loc>https://embeddedllm.com/blog/see-the-power-of-llama-32-vision-on-amd-mi300x/</loc>
    <priority>0.7</priority>
    <lastmod>2024-10-28</lastmod>
  </url>

  <url>
    <loc>https://embeddedllm.com/blog/vllm-asia-developer-day-2025/</loc>
    <priority>0.7</priority>
    <lastmod>2025-04-02</lastmod>
  </url>

  <url>
    <loc>https://embeddedllm.com/blog/vllm-now-supports-running-gguf-on-amd-radeon-gpu/</loc>
    <priority>0.7</priority>
    <lastmod>2024-12-01</lastmod>
  </url>

  <url>
    <loc>https://embeddedllm.com/blog/vllm_rocm/</loc>
    <priority>0.7</priority>
    <lastmod>2023-10-27</lastmod>
  </url>

  <url>
    <loc>https://embeddedllm.com/blog/why-jamai-base-moved-embedding-models-to-intel-xeon-cpus/</loc>
    <priority>0.7</priority>
    <lastmod>2025-01-06</lastmod>
  </url>
</urlset>