<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:news="http://www.google.com/schemas/sitemap-news/0.9">
  <url>
    <loc>https://embeddedllm.com/newsroom/</loc>
    <news:news>
      <news:publication>
        <news:name>EmbeddedLLM</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2025-07-08T00:00:00.000Z</news:publication_date>
      <news:title>Embedded LLM Launches TokenVisor, Boosting Monetisation for the AMD GPU Neocloud Community</news:title>
      <news:authors>EmbeddedLLM Team</news:authors>
    </news:news>
  </url>
  <url>
    <loc>https://embeddedllm.com/blog/cuda-to-rocm-portability-case-study-liger-kernel/</loc>
    <news:news>
      <news:publication>
        <news:name>EmbeddedLLM</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2024-11-05T00:00:00.000Z</news:publication_date>
      <news:title>Liger Kernels Leap the CUDA Moat: A Case Study with Liger, LinkedIn&apos;s SOTA Training Kernels on AMD GPU</news:title>
      <news:authors>EmbeddedLLM Team</news:authors>
    </news:news>
  </url>
  <url>
    <loc>https://embeddedllm.com/blog/foseal-hackathon-2025/</loc>
    <news:news>
      <news:publication>
        <news:name>EmbeddedLLM</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2025-06-23T00:00:00.000Z</news:publication_date>
      <news:title>Growing Solutions: Students Win FoSEAL Hackathon 2025 with AI-Powered Agriculture App</news:title>
      <news:authors>EmbeddedLLM Team</news:authors>
    </news:news>
  </url>
  <url>
    <loc>https://embeddedllm.com/blog/how-to-build-vllm-on-mi300x-from-source/</loc>
    <news:news>
      <news:publication>
        <news:name>EmbeddedLLM</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2024-10-11T00:00:00.000Z</news:publication_date>
      <news:title>How to Build vLLM on MI300X from Source</news:title>
      <news:authors>EmbeddedLLM Team</news:authors>
    </news:news>
  </url>
  <url>
    <loc>https://embeddedllm.com/blog/see-the-power-of-llama-32-vision-on-amd-mi300x/</loc>
    <news:news>
      <news:publication>
        <news:name>EmbeddedLLM</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2024-10-28T00:00:00.000Z</news:publication_date>
      <news:title>See the Power of Llama 3.2 Vision on AMD MI300X</news:title>
      <news:authors>EmbeddedLLM Team</news:authors>
    </news:news>
  </url>
  <url>
    <loc>https://embeddedllm.com/blog/vllm-asia-developer-day-2025/</loc>
    <news:news>
      <news:publication>
        <news:name>EmbeddedLLM</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2025-04-02T00:00:00.000Z</news:publication_date>
      <news:title>Inaugural vLLM Asia Developer Day 2025</news:title>
      <news:authors>EmbeddedLLM Team</news:authors>
    </news:news>
  </url>
  <url>
    <loc>https://embeddedllm.com/blog/vllm-now-supports-running-gguf-on-amd-radeon-gpu/</loc>
    <news:news>
      <news:publication>
        <news:name>EmbeddedLLM</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2024-12-01T00:00:00.000Z</news:publication_date>
      <news:title>vLLM Now Supports Running GGUF on AMD Radeon GPU</news:title>
      <news:authors>EmbeddedLLM Team</news:authors>
    </news:news>
  </url>
  <url>
    <loc>https://embeddedllm.com/blog/vllm_rocm/</loc>
    <news:news>
      <news:publication>
        <news:name>EmbeddedLLM</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2023-10-27T00:00:00.000Z</news:publication_date>
      <news:title>High throughput LLM inference with vLLM and AMD: Achieving LLM inference parity with Nvidia</news:title>
      <news:authors>EmbeddedLLM Team</news:authors>
    </news:news>
  </url>
  <url>
    <loc>https://embeddedllm.com/blog/why-jamai-base-moved-embedding-models-to-intel-xeon-cpus/</loc>
    <news:news>
      <news:publication>
        <news:name>EmbeddedLLM</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2025-01-06T00:00:00.000Z</news:publication_date>
      <news:title>Beyond GPUs: Why JamAI Base Moved Embedding Models to Intel Xeon CPUs</news:title>
      <news:authors>EmbeddedLLM Team</news:authors>
    </news:news>
  </url>
</urlset>
