{
  "$schema": "https://learn.engineering.vips/schemas/pages.schema.json",
  "publisher": {
    "name": "Vivekananda School of Engineering & Technology",
    "legalName": "Vivekananda Institute of Professional Studies - Technical Campus",
    "url": "https://learn.engineering.vips.edu",
    "logo": "https://learn.engineering.vips.edu/logo-9618729b.webp",
    "parentSite": "https://engineering.vips.edu",
    "affiliation": "Guru Gobind Singh Indraprastha University",
    "accreditation": "NAAC A++"
  },
  "license": "https://creativecommons.org/licenses/by/4.0/",
  "generated": "2026-04-24",
  "verified": "2026-04-24",
  "totalPages": 1002,
  "categories": [
    {
      "key": "models",
      "title": "AI Models",
      "description": "Frontier and open-weights large language models — capabilities, pricing, benchmarks, and when to use each.",
      "slug": "/ai-models",
      "pillar": "Curiosity",
      "count": 196,
      "target": 200,
      "indexUrl": "https://learn.engineering.vips.edu/ai-models"
    },
    {
      "key": "comparisons",
      "title": "Model & Tool Comparisons",
      "description": "Head-to-head comparisons of AI models, frameworks, and tools for real engineering decisions.",
      "slug": "/compare",
      "pillar": "Capability",
      "count": 148,
      "target": 150,
      "indexUrl": "https://learn.engineering.vips.edu/compare"
    },
    {
      "key": "mcp",
      "title": "Model Context Protocol",
      "description": "MCP overview, server directory, client patterns, and integration guides.",
      "slug": "/mcp",
      "pillar": "Creativity",
      "count": 163,
      "target": 150,
      "indexUrl": "https://learn.engineering.vips.edu/mcp"
    },
    {
      "key": "a2a",
      "title": "Agent-to-Agent Protocols",
      "description": "A2A kits, agent interop standards, and multi-agent orchestration protocols.",
      "slug": "/agent-protocols",
      "pillar": "Creativity",
      "count": 102,
      "target": 100,
      "indexUrl": "https://learn.engineering.vips.edu/agent-protocols"
    },
    {
      "key": "frameworks",
      "title": "AI Frameworks & Tooling",
      "description": "LangChain, LlamaIndex, CrewAI, AutoGen, DSPy, and the rest of the agent-stack.",
      "slug": "/frameworks",
      "pillar": "Capability",
      "count": 162,
      "target": 150,
      "indexUrl": "https://learn.engineering.vips.edu/frameworks"
    },
    {
      "key": "concepts",
      "title": "Core Concepts",
      "description": "RAG, fine-tuning, embeddings, evaluation, prompt engineering — the vocabulary of applied AI.",
      "slug": "/concepts",
      "pillar": "Curiosity",
      "count": 77,
      "target": 100,
      "indexUrl": "https://learn.engineering.vips.edu/concepts"
    },
    {
      "key": "applications",
      "title": "Applications",
      "description": "AI use-cases across domains — healthcare, finance, education, developer tooling.",
      "slug": "/applications",
      "pillar": "Contribution",
      "count": 101,
      "target": 100,
      "indexUrl": "https://learn.engineering.vips.edu/applications"
    },
    {
      "key": "vips-bridge",
      "title": "Learn at VSET",
      "description": "How VIPS (VSET) students explore these topics — labs, projects, programs, and community.",
      "slug": "/at-vips",
      "pillar": "Contribution",
      "count": 52,
      "target": 50,
      "indexUrl": "https://learn.engineering.vips.edu/at-vips"
    }
  ],
  "programmes": [
    {
      "name": "B.Tech in Computer Science and Engineering",
      "shortName": "B.Tech CSE",
      "url": "https://engineering.vips.edu/department/computer-science/cse",
      "department": "Computer Science and Engineering",
      "field": "Computer Science and Engineering",
      "duration": "P4Y",
      "credential": "B.Tech"
    },
    {
      "name": "B.Tech in Computer Science and Engineering (Cyber Security)",
      "shortName": "B.Tech CSE (Cyber Security)",
      "url": "https://engineering.vips.edu/department/computer-science/cse-cs",
      "department": "Computer Science and Engineering",
      "field": "Computer Science with Cyber Security",
      "duration": "P4Y",
      "credential": "B.Tech"
    },
    {
      "name": "B.Tech in Computer Science and Engineering (Applied Mathematics)",
      "shortName": "B.Tech CSE (Applied Mathematics)",
      "url": "https://engineering.vips.edu/department/computer-science/cse-am",
      "department": "Computer Science and Engineering",
      "field": "Computer Science with Applied Mathematics",
      "duration": "P4Y",
      "credential": "B.Tech"
    },
    {
      "name": "B.Tech in Computer Science and Engineering (Artificial Intelligence and Machine Learning)",
      "shortName": "B.Tech AI & ML",
      "url": "https://engineering.vips.edu/department/artificial-intelligence/aiml",
      "department": "Artificial Intelligence and Data Science",
      "field": "Artificial Intelligence and Machine Learning",
      "duration": "P4Y",
      "credential": "B.Tech"
    },
    {
      "name": "B.Tech in Computer Science and Engineering (Artificial Intelligence and Data Science)",
      "shortName": "B.Tech AI & DS",
      "url": "https://engineering.vips.edu/department/artificial-intelligence/aids",
      "department": "Artificial Intelligence and Data Science",
      "field": "Artificial Intelligence and Data Science",
      "duration": "P4Y",
      "credential": "B.Tech"
    },
    {
      "name": "B.Tech in Electronics Engineering (VLSI Design and Technology)",
      "shortName": "B.Tech Electronics (VLSI)",
      "url": "https://engineering.vips.edu/department/electronics/vlsi",
      "department": "Electronics Engineering",
      "field": "Electronics Engineering / VLSI",
      "duration": "P4Y",
      "credential": "B.Tech"
    },
    {
      "name": "B.Tech in Industrial Internet of Things",
      "shortName": "B.Tech IIoT",
      "url": "https://engineering.vips.edu/department/electronics/iot",
      "department": "Electronics Engineering",
      "field": "Industrial Internet of Things",
      "duration": "P4Y",
      "credential": "B.Tech"
    }
  ],
  "pages": [
    {
      "title": "Yi-Large",
      "slug": "01-ai-yi-large",
      "url": "https://learn.engineering.vips.edu/ai-models/01-ai-yi-large",
      "description": "01.AI's Yi-Large is Kai-Fu Lee's flagship Chinese/English LLM, a closed-model 2024 release optimised for reasoning, multilingual chat, and enterprise RAG.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Yi-Large",
        "01.AI",
        "Kai-Fu Lee",
        "Chinese LLM",
        "bilingual",
        "enterprise"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Adobe Firefly Image 3",
      "slug": "adobe-firefly-3",
      "url": "https://learn.engineering.vips.edu/ai-models/adobe-firefly-3",
      "description": "Firefly Image 3 is Adobe's commercially-safe generative image model, trained on licensed Adobe Stock content and deeply integrated into Photoshop, Illustrator, and Express.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Firefly",
        "Adobe",
        "text-to-image",
        "Generative Fill",
        "Creative Cloud"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Jamba 1.5 Large",
      "slug": "ai21-jamba-1-5-large",
      "url": "https://learn.engineering.vips.edu/ai-models/ai21-jamba-1-5-large",
      "description": "Jamba 1.5 Large is AI21 Labs' open-weights hybrid SSM-Transformer model — a 398B total / 94B active MoE combining Mamba and attention layers with 256K context.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Jamba 1.5",
        "AI21 Labs",
        "open weights",
        "Mamba",
        "SSM",
        "hybrid architecture"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GTE-Qwen2 7B Instruct",
      "slug": "alibaba-gte-qwen2-7b-instruct",
      "url": "https://learn.engineering.vips.edu/ai-models/alibaba-gte-qwen2-7b-instruct",
      "description": "GTE-Qwen2 7B Instruct is Alibaba DAMO's 7B-parameter open text-embedding model — topped the MTEB leaderboard at release, built on the Qwen 2 backbone for 4096-dim dense retrieval.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "GTE-Qwen2",
        "embeddings",
        "Alibaba",
        "MTEB",
        "retrieval",
        "open source"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Marco-o1",
      "slug": "alibaba-marco-o1",
      "url": "https://learn.engineering.vips.edu/ai-models/alibaba-marco-o1",
      "description": "Alibaba's Marco-o1 is an open-weight reasoning LLM that applies o1-style chain-of-thought search using Monte Carlo Tree Search over reasoning trajectories.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Marco-o1",
        "Alibaba",
        "o1-style",
        "reasoning",
        "MCTS",
        "Qwen2"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Qwen 2.5 72B Instruct",
      "slug": "alibaba-qwen-2-5-72b",
      "url": "https://learn.engineering.vips.edu/ai-models/alibaba-qwen-2-5-72b",
      "description": "Qwen 2.5 72B Instruct is Alibaba's 2024 open-weights flagship dense model — Apache 2.0 licensed, matching Llama 3.1 405B on many benchmarks at a 72B footprint.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Qwen 2.5",
        "Alibaba",
        "open weights",
        "72B",
        "Apache 2.0",
        "multilingual"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Qwen 2.5 Coder 32B",
      "slug": "alibaba-qwen-2-5-coder-32b",
      "url": "https://learn.engineering.vips.edu/ai-models/alibaba-qwen-2-5-coder-32b",
      "description": "Qwen 2.5 Coder 32B is Alibaba's open-weights coding flagship — a 32B dense model that matched GPT-4o on HumanEval at release and runs on a single H100.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Qwen 2.5 Coder",
        "Alibaba",
        "open weights",
        "code LLM",
        "Apache 2.0"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Qwen2.5-Math 72B",
      "slug": "alibaba-qwen-2-5-math-72b",
      "url": "https://learn.engineering.vips.edu/ai-models/alibaba-qwen-2-5-math-72b",
      "description": "Qwen2.5-Math 72B is Alibaba's open-weights math specialist — a 72-billion-parameter Qwen2.5 fine-tune with tool-augmented (Python) reasoning for Olympiad-class problems.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Qwen2.5-Math",
        "Alibaba",
        "open weights",
        "math",
        "tool use"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Qwen2.5-VL 72B",
      "slug": "alibaba-qwen-2-5-vl-72b",
      "url": "https://learn.engineering.vips.edu/ai-models/alibaba-qwen-2-5-vl-72b",
      "description": "Qwen2.5-VL 72B is Alibaba's top-tier open-weights vision-language model — a 72B transformer with agentic UI grounding, long-video understanding, and precise document OCR.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Qwen2.5-VL",
        "Alibaba",
        "vision-language",
        "open weights",
        "VLM",
        "GUI agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Qwen2-Audio 7B",
      "slug": "alibaba-qwen-2-audio-7b",
      "url": "https://learn.engineering.vips.edu/ai-models/alibaba-qwen-2-audio-7b",
      "description": "Qwen2-Audio 7B is Alibaba's open-weights audio-language model — a 7B transformer that accepts speech, music, and environmental sounds and responds in natural-language text.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Qwen2-Audio",
        "Alibaba",
        "audio",
        "speech",
        "open weights"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Qwen2-VL 72B",
      "slug": "alibaba-qwen-2-vl-72b",
      "url": "https://learn.engineering.vips.edu/ai-models/alibaba-qwen-2-vl-72b",
      "description": "Qwen2-VL 72B is Alibaba's flagship open vision-language model with dynamic-resolution visual encoding, strong OCR, and 20-minute video understanding on the Qwen 2 backbone.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Qwen2-VL",
        "Alibaba",
        "VLM",
        "open source",
        "vision-language",
        "document AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Qwen 3",
      "slug": "alibaba-qwen-3",
      "url": "https://learn.engineering.vips.edu/ai-models/alibaba-qwen-3",
      "description": "Qwen 3 is Alibaba's 2025 flagship open-weights family — dense and MoE variants from 0.6B to 235B, Apache 2.0 licensed, with strong multilingual and reasoning behavior.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Qwen 3",
        "Alibaba",
        "open weights",
        "MoE",
        "Apache 2.0",
        "Chinese AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Qwen QwQ 32B",
      "slug": "alibaba-qwq-32b",
      "url": "https://learn.engineering.vips.edu/ai-models/alibaba-qwq-32b",
      "description": "Qwen QwQ 32B is Alibaba's open-weights reasoning model — a 32B dense variant trained with reinforcement learning that competes with DeepSeek R1 at a much smaller footprint.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "QwQ 32B",
        "Alibaba",
        "reasoning model",
        "open weights",
        "o1 alternative"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude 2.1",
      "slug": "anthropic-claude-2-1",
      "url": "https://learn.engineering.vips.edu/ai-models/anthropic-claude-2-1",
      "description": "Claude 2.1 is Anthropic's late-2023 flagship — introduced the 200K-token context window and improved refusal behaviour. Now a legacy model referenced mostly for benchmark comparisons.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Claude 2.1",
        "Anthropic",
        "legacy",
        "200K context",
        "deprecated"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude 3.5 Haiku",
      "slug": "anthropic-claude-3-5-haiku",
      "url": "https://learn.engineering.vips.edu/ai-models/anthropic-claude-3-5-haiku",
      "description": "Claude 3.5 Haiku is Anthropic's November 2024 small model — fast, cheap, and the first Haiku to match or beat Claude 3 Opus on several coding and reasoning benchmarks.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Claude 3.5 Haiku",
        "Anthropic",
        "Claude 3.5",
        "fast LLM",
        "cheap AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude 3.5 Sonnet",
      "slug": "anthropic-claude-3-5-sonnet",
      "url": "https://learn.engineering.vips.edu/ai-models/anthropic-claude-3-5-sonnet",
      "description": "Claude 3.5 Sonnet is the June 2024 model that made Claude famous for coding — state-of-the-art SWE-bench at launch, tool use, vision, and the first computer-use preview.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Claude 3.5 Sonnet",
        "Anthropic",
        "SWE-bench",
        "computer use",
        "coding",
        "tool use"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude 3.7 Sonnet",
      "slug": "anthropic-claude-3-7-sonnet",
      "url": "https://learn.engineering.vips.edu/ai-models/anthropic-claude-3-7-sonnet",
      "description": "Claude 3.7 Sonnet is Anthropic's February 2025 hybrid reasoning model — the first Claude with extended thinking, mixing fast responses and long chain-of-thought in one model.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Claude 3.7 Sonnet",
        "Anthropic",
        "extended thinking",
        "hybrid reasoning",
        "SWE-bench"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude 3 Haiku",
      "slug": "anthropic-claude-3-haiku",
      "url": "https://learn.engineering.vips.edu/ai-models/anthropic-claude-3-haiku",
      "description": "Claude 3 Haiku is Anthropic's original March 2024 small, fast, cheap model — the first Haiku tier, still widely deployed in legacy pipelines despite being surpassed by Haiku 3.5 and 4.5.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Claude 3 Haiku",
        "Anthropic",
        "Claude 3",
        "cheap LLM",
        "classification"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude 3 Opus",
      "slug": "anthropic-claude-3-opus",
      "url": "https://learn.engineering.vips.edu/ai-models/anthropic-claude-3-opus",
      "description": "Claude 3 Opus is Anthropic's March 2024 flagship — the original Opus tier that established Claude as a GPT-4-class frontier model with strong long-context and reasoning performance.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Claude 3 Opus",
        "Anthropic",
        "Claude 3",
        "frontier LLM",
        "long context"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude 3 Sonnet",
      "slug": "anthropic-claude-3-sonnet",
      "url": "https://learn.engineering.vips.edu/ai-models/anthropic-claude-3-sonnet",
      "description": "Claude 3 Sonnet is Anthropic's March 2024 mid-tier model — the original Sonnet that balanced cost and quality in the Claude 3 launch before 3.5 Sonnet redefined the tier.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Claude 3 Sonnet",
        "Anthropic",
        "Claude 3",
        "mid-tier LLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Code",
      "slug": "anthropic-claude-code",
      "url": "https://learn.engineering.vips.edu/ai-models/anthropic-claude-code",
      "description": "Claude Code is Anthropic's official agentic command-line product — a terminal-first coding agent built on the Claude models, with native tool use, file editing, and git integration.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Claude Code",
        "Anthropic",
        "agent",
        "CLI",
        "coding agent",
        "MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Haiku 4.5",
      "slug": "anthropic-claude-haiku-4-5",
      "url": "https://learn.engineering.vips.edu/ai-models/anthropic-claude-haiku-4-5",
      "description": "Claude Haiku 4.5 is Anthropic's fast, low-cost 2025 model — matches Sonnet 4 on many tasks at about one-third the price and double the speed, ideal for sub-tasks and real-time UX.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Claude Haiku 4.5",
        "Anthropic",
        "Claude 4",
        "fast LLM",
        "cheap AI",
        "sub-agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Instant 1.2",
      "slug": "anthropic-claude-instant-1-2",
      "url": "https://learn.engineering.vips.edu/ai-models/anthropic-claude-instant-1-2",
      "description": "Claude Instant 1.2 is Anthropic's 2023 low-latency chat model — the cheap, fast sibling of Claude 1. Deprecated in favour of the Haiku line but still referenced in many legacy apps.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Claude Instant",
        "Anthropic",
        "legacy",
        "deprecated",
        "100K context"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Opus 4.7",
      "slug": "anthropic-claude-opus-4-7",
      "url": "https://learn.engineering.vips.edu/ai-models/anthropic-claude-opus-4-7",
      "description": "Claude Opus 4.7 is Anthropic's top-tier model for long-context reasoning, code generation, and agentic workflows. 1M context, native tool use, strong on SWE-bench.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Claude Opus 4.7",
        "Anthropic",
        "LLM",
        "agent",
        "SWE-bench",
        "MCP",
        "1M context"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Sonnet 4.5",
      "slug": "anthropic-claude-sonnet-4-5",
      "url": "https://learn.engineering.vips.edu/ai-models/anthropic-claude-sonnet-4-5",
      "description": "Claude Sonnet 4.5 is Anthropic's September 2025 Sonnet refresh — a best-in-class coding model at the time with 200K context, extended thinking, and strong agent behaviour.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Claude Sonnet 4.5",
        "Anthropic",
        "Claude 4",
        "SWE-bench",
        "extended thinking",
        "coding agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Sonnet 4.6",
      "slug": "anthropic-claude-sonnet-4-6",
      "url": "https://learn.engineering.vips.edu/ai-models/anthropic-claude-sonnet-4-6",
      "description": "Claude Sonnet 4.6 is Anthropic's everyday-workhorse model — balances quality and cost, 1M context, strong coding and tool use, and powers most Claude-based production apps in 2026.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Claude Sonnet 4.6",
        "Anthropic",
        "Claude 4",
        "coding",
        "1M context",
        "extended thinking",
        "MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenELM 3B",
      "slug": "apple-openelm-3b",
      "url": "https://learn.engineering.vips.edu/ai-models/apple-openelm-3b",
      "description": "Apple's OpenELM 3B is an open, on-device-friendly LLM using layer-wise scaling, released with full training recipe and CoreML export in 2024.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "OpenELM 3B",
        "Apple",
        "on-device",
        "CoreML",
        "layer-wise scaling",
        "edge LLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AssemblyAI Universal-2",
      "slug": "assemblyai-universal-2",
      "url": "https://learn.engineering.vips.edu/ai-models/assemblyai-universal-2",
      "description": "AssemblyAI Universal-2 is a batch-first speech-to-text model with state-of-the-art English WER and built-in LeMUR LLM features for summaries, chapters, and Q&A.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "AssemblyAI",
        "Universal-2",
        "speech to text",
        "ASR",
        "LeMUR",
        "transcription"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Baichuan 4",
      "slug": "baichuan-baichuan-4",
      "url": "https://learn.engineering.vips.edu/ai-models/baichuan-baichuan-4",
      "description": "Baichuan Intelligent's Baichuan 4 is a closed Chinese LLM with 192k context, strong reasoning and bilingual performance, widely used in Chinese enterprise.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Baichuan 4",
        "Baichuan Intelligent",
        "Wang Xiaochuan",
        "Chinese LLM",
        "bilingual"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "BAAI BGE-M3",
      "slug": "bge-m3",
      "url": "https://learn.engineering.vips.edu/ai-models/bge-m3",
      "description": "BGE-M3 is BAAI's open-weight multilingual embedding model — one backbone producing dense, sparse, and multi-vector retrievals over 100+ languages with 8k context.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "BGE-M3",
        "BAAI",
        "open-source embeddings",
        "hybrid search",
        "multilingual",
        "ColBERT"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "BAAI BGE Reranker v2-M3",
      "slug": "bge-reranker-v2-m3",
      "url": "https://learn.engineering.vips.edu/ai-models/bge-reranker-v2-m3",
      "description": "BGE Reranker v2-M3 is BAAI's open-weight multilingual cross-encoder reranker — pairs naturally with BGE-M3 embeddings for a fully open-source RAG pipeline.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "BGE Reranker v2",
        "BAAI",
        "reranker",
        "cross-encoder",
        "multilingual",
        "open-source"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Black Forest Labs FLUX.1 [dev]",
      "slug": "black-forest-flux-1-dev",
      "url": "https://learn.engineering.vips.edu/ai-models/black-forest-flux-1-dev",
      "description": "FLUX.1 [dev] is Black Forest Labs' open-weight 12B diffusion transformer — near-[pro] quality for research and non-commercial use, with a growing LoRA ecosystem.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "FLUX.1 dev",
        "Black Forest Labs",
        "open weights",
        "diffusion",
        "LoRA",
        "ComfyUI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Black Forest Labs FLUX.1 [pro]",
      "slug": "black-forest-flux-1-pro",
      "url": "https://learn.engineering.vips.edu/ai-models/black-forest-flux-1-pro",
      "description": "FLUX.1 [pro] is Black Forest Labs' flagship closed text-to-image model — state-of-the-art prompt adherence and photorealism, served via bfl.ai and partner APIs.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "FLUX.1 pro",
        "Black Forest Labs",
        "text to image",
        "photorealism",
        "diffusion"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "BloombergGPT",
      "slug": "bloomberg-bloomberg-gpt",
      "url": "https://learn.engineering.vips.edu/ai-models/bloomberg-bloomberg-gpt",
      "description": "BloombergGPT is a 50-billion-parameter finance-specialised LLM trained on Bloomberg's proprietary financial corpus — a landmark domain model for finance NLP.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "BloombergGPT",
        "Bloomberg",
        "finance LLM",
        "domain model",
        "FinPile",
        "finance NLP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Cartesia Sonic",
      "slug": "cartesia-sonic",
      "url": "https://learn.engineering.vips.edu/ai-models/cartesia-sonic",
      "description": "Sonic is Cartesia's low-latency text-to-speech model built on state-space-model (Mamba-style) architectures — sub-90 ms time-to-first-audio for real-time voice agents.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Cartesia",
        "Sonic",
        "TTS",
        "Mamba",
        "state-space model",
        "voice agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Aya 23 35B",
      "slug": "cohere-aya-23-35b",
      "url": "https://learn.engineering.vips.edu/ai-models/cohere-aya-23-35b",
      "description": "Aya 23 35B is Cohere For AI's 2024 open-weights multilingual model — a 35-billion-parameter decoder built on Command R, tuned across 23 languages.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Aya 23",
        "Cohere For AI",
        "multilingual",
        "open weights",
        "35B"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Aya Expanse 32B",
      "slug": "cohere-aya-expanse-32b",
      "url": "https://learn.engineering.vips.edu/ai-models/cohere-aya-expanse-32b",
      "description": "Aya Expanse 32B is Cohere For AI's follow-up multilingual open-weights model — a 32B Command-family decoder covering 23 languages with state-of-the-art per-language quality.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Aya Expanse",
        "Cohere For AI",
        "multilingual",
        "open weights",
        "32B"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Command R+",
      "slug": "cohere-command-r-plus",
      "url": "https://learn.engineering.vips.edu/ai-models/cohere-command-r-plus",
      "description": "Command R+ is Cohere's 104B open-weights model purpose-built for RAG and tool-use — strong citation quality and multilingual support under the CC-BY-NC research license.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Command R+",
        "Cohere",
        "RAG",
        "tool use",
        "multilingual LLM",
        "enterprise AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Command R",
      "slug": "cohere-command-r",
      "url": "https://learn.engineering.vips.edu/ai-models/cohere-command-r",
      "description": "Command R is Cohere's RAG-first production LLM — a mid-size model tuned for grounded answers with citations, tool use, and multilingual enterprise deployments.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Command R",
        "Cohere",
        "RAG",
        "grounded answers",
        "multilingual"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Cohere Embed v3",
      "slug": "cohere-embed-v3",
      "url": "https://learn.engineering.vips.edu/ai-models/cohere-embed-v3",
      "description": "Cohere Embed v3 is a multilingual retrieval embedding model with input-type prompts (search_document, search_query) and strong BEIR scores for enterprise RAG.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Cohere Embed v3",
        "embeddings",
        "multilingual",
        "RAG",
        "BEIR",
        "input-type"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Cohere Rerank 3",
      "slug": "cohere-rerank-3",
      "url": "https://learn.engineering.vips.edu/ai-models/cohere-rerank-3",
      "description": "Cohere Rerank 3 is a cross-encoder reranker for RAG — score (query, document) pairs to boost top-k relevance after a first-stage embedding retrieval.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Cohere Rerank 3",
        "reranker",
        "cross-encoder",
        "RAG",
        "precision"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Cohere Rerank 3 (Multilingual)",
      "slug": "cohere-rerank-multilingual-v3",
      "url": "https://learn.engineering.vips.edu/ai-models/cohere-rerank-multilingual-v3",
      "description": "Cohere Rerank 3 Multilingual is a cross-encoder reranking model over 100+ languages — reorders retrieval hits by query relevance for RAG and search at low latency.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Cohere",
        "rerank",
        "reranker",
        "cross-encoder",
        "RAG",
        "multilingual"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DBRX Instruct",
      "slug": "databricks-dbrx-instruct",
      "url": "https://learn.engineering.vips.edu/ai-models/databricks-dbrx-instruct",
      "description": "Databricks DBRX Instruct is a 132B-parameter open-weight MoE model (36B active) trained on 12T tokens, optimised for enterprise data and lakehouse RAG.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "DBRX Instruct",
        "Databricks",
        "MoE",
        "open weights",
        "lakehouse",
        "enterprise"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Deepgram Nova-3",
      "slug": "deepgram-nova-3",
      "url": "https://learn.engineering.vips.edu/ai-models/deepgram-nova-3",
      "description": "Deepgram Nova-3 is a streaming-first speech-to-text model — sub-300 ms real-time transcription with diarisation, keyterm prompting, and strong accented-English WER.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Deepgram",
        "Nova-3",
        "speech to text",
        "ASR",
        "streaming",
        "voice agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Google DeepMind AlphaFold 3",
      "slug": "deepmind-alphafold-3",
      "url": "https://learn.engineering.vips.edu/ai-models/deepmind-alphafold-3",
      "description": "AlphaFold 3 is Google DeepMind's biology model that predicts joint structures of proteins, DNA, RNA, ligands, and ions — a step-change for drug-discovery workflows.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "AlphaFold 3",
        "DeepMind",
        "structural biology",
        "drug discovery",
        "Isomorphic Labs",
        "AlphaFold Server"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DeepMind AlphaProof",
      "slug": "deepmind-alphaproof",
      "url": "https://learn.engineering.vips.edu/ai-models/deepmind-alphaproof",
      "description": "AlphaProof is Google DeepMind's AI math-proof system that achieved silver-medal IMO performance — Gemini-trained reinforcement learning over Lean 4 theorem-proving environments.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "AlphaProof",
        "DeepMind",
        "theorem proving",
        "Lean 4",
        "IMO",
        "math"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DeepSeek Coder 33B Instruct",
      "slug": "deepseek-coder-33b-instruct",
      "url": "https://learn.engineering.vips.edu/ai-models/deepseek-coder-33b-instruct",
      "description": "DeepSeek Coder 33B Instruct is DeepSeek AI's 2023 open-weights coding LLM — a 33B dense decoder trained on 2T tokens of code, fluent in 80+ programming languages.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "DeepSeek Coder",
        "DeepSeek",
        "open weights",
        "coding",
        "33B"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DeepSeek Coder V2",
      "slug": "deepseek-coder-v2",
      "url": "https://learn.engineering.vips.edu/ai-models/deepseek-coder-v2",
      "description": "DeepSeek Coder V2 is the open-weights coding SOTA — a 236B parameter MoE (21B active) that matched closed-frontier coding models on HumanEval and LiveCodeBench.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "DeepSeek Coder V2",
        "open weights",
        "code LLM",
        "MoE",
        "coding assistant"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Janus Pro 7B",
      "slug": "deepseek-janus-pro-7b",
      "url": "https://learn.engineering.vips.edu/ai-models/deepseek-janus-pro-7b",
      "description": "Janus Pro 7B is DeepSeek AI's open-weights unified multimodal model — a 7B transformer that both understands and generates images through decoupled visual encoders.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Janus Pro",
        "DeepSeek",
        "multimodal",
        "image generation",
        "VLM",
        "open weights"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DeepSeek LLM 67B",
      "slug": "deepseek-llm-67b",
      "url": "https://learn.engineering.vips.edu/ai-models/deepseek-llm-67b",
      "description": "DeepSeek LLM 67B is DeepSeek AI's 2023 general-purpose open-weights model — a 67-billion-parameter dense decoder that served as the bilingual Chinese/English foundation for later DeepSeek releases.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "DeepSeek LLM",
        "DeepSeek",
        "open weights",
        "67B",
        "bilingual"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DeepSeek-Math 7B",
      "slug": "deepseek-math-7b",
      "url": "https://learn.engineering.vips.edu/ai-models/deepseek-math-7b",
      "description": "DeepSeek-Math 7B is a specialised open-weight LLM trained on 120B math tokens, matching much larger models on MATH and GSM8K benchmarks.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "DeepSeek-Math 7B",
        "DeepSeek",
        "math LLM",
        "GSM8K",
        "MATH",
        "open weights"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DeepSeek-Prover V2",
      "slug": "deepseek-prover-v2",
      "url": "https://learn.engineering.vips.edu/ai-models/deepseek-prover-v2",
      "description": "DeepSeek-Prover V2 is DeepSeek's open-weights formal theorem prover for Lean 4, trained with reinforcement learning and self-play — state-of-the-art on MiniF2F and PutnamBench.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "DeepSeek-Prover",
        "Lean 4",
        "theorem proving",
        "open source",
        "math"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DeepSeek R1",
      "slug": "deepseek-r1",
      "url": "https://learn.engineering.vips.edu/ai-models/deepseek-r1",
      "description": "DeepSeek R1 is the first open-weights reasoning model to credibly compete with OpenAI o1 — MIT-licensed, with distilled variants down to 1.5B for local inference.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "DeepSeek R1",
        "reasoning model",
        "open weights",
        "MIT license",
        "o1 competitor"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DeepSeek V2.5",
      "slug": "deepseek-v2-5",
      "url": "https://learn.engineering.vips.edu/ai-models/deepseek-v2-5",
      "description": "DeepSeek V2.5 is the combined chat + coder unification of DeepSeek's V2 line — a 236B/21B-active MoE released in September 2024 that preceded the V3 breakthrough.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "DeepSeek V2.5",
        "open weights",
        "MoE",
        "chat coder unified"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DeepSeek V3",
      "slug": "deepseek-v3",
      "url": "https://learn.engineering.vips.edu/ai-models/deepseek-v3",
      "description": "DeepSeek V3 is a 671B parameter open-weights Mixture-of-Experts model from Chinese AI lab DeepSeek — it matched GPT-4-class quality at a fraction of the training cost, reshaping open-source LLM expectations.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "DeepSeek V3",
        "open weights",
        "MoE",
        "Chinese AI",
        "frontier open model"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DeepSeek-VL2",
      "slug": "deepseek-vl2",
      "url": "https://learn.engineering.vips.edu/ai-models/deepseek-vl2",
      "description": "DeepSeek-VL2 is a family of mixture-of-experts vision-language models (3B / 16B / 27B total, 1B / 2.8B / 4.5B active) with strong OCR and grounding on a DeepSeekMoE backbone.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "DeepSeek-VL2",
        "MoE",
        "vision-language",
        "open source",
        "OCR"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "ElevenLabs Multilingual v2",
      "slug": "elevenlabs-multilingual-v2",
      "url": "https://learn.engineering.vips.edu/ai-models/elevenlabs-multilingual-v2",
      "description": "ElevenLabs Multilingual v2 is the leading text-to-speech model for expressive multilingual voice cloning — 29+ languages, voice design, and studio-grade dubbing.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "ElevenLabs",
        "multilingual v2",
        "text to speech",
        "voice cloning",
        "dubbing",
        "audiobooks"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Figure Helix (Figure 02)",
      "slug": "figure-figure-02-helix",
      "url": "https://learn.engineering.vips.edu/ai-models/figure-figure-02-helix",
      "description": "Helix is Figure AI's generalist vision-language-action model for the Figure 02 humanoid — a dual-system architecture with a slow VLM planner and a fast 200 Hz visuomotor policy.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Helix",
        "Figure AI",
        "humanoid",
        "VLA",
        "Figure 02",
        "embodied AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemini 1.5 Flash",
      "slug": "google-gemini-1-5-flash",
      "url": "https://learn.engineering.vips.edu/ai-models/google-gemini-1-5-flash",
      "description": "Gemini 1.5 Flash is Google's May 2024 fast, cheap, 1M-context Flash tier — the first sub-$0.50/M token Gemini, widely deployed in 2024-25 for RAG and bulk pipelines.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Gemini 1.5 Flash",
        "Google",
        "Vertex AI",
        "cheap LLM",
        "1M context"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemini 1.5 Pro",
      "slug": "google-gemini-1-5-pro",
      "url": "https://learn.engineering.vips.edu/ai-models/google-gemini-1-5-pro",
      "description": "Gemini 1.5 Pro is Google's February 2024 long-context flagship — the model that popularised 1M (and briefly 2M) token context windows and native video understanding.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Gemini 1.5 Pro",
        "Google",
        "long context",
        "1M tokens",
        "MoE",
        "video understanding"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemini 2.0 Flash Thinking",
      "slug": "google-gemini-2-0-flash-thinking",
      "url": "https://learn.engineering.vips.edu/ai-models/google-gemini-2-0-flash-thinking",
      "description": "Gemini 2.0 Flash Thinking is Google's experimental December 2024 reasoning model — a 2.0 Flash variant that exposes chain-of-thought for math, science, and coding.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Gemini 2.0 Flash Thinking",
        "Google",
        "reasoning model",
        "chain of thought",
        "experimental"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemini 2.0 Flash",
      "slug": "google-gemini-2-0-flash",
      "url": "https://learn.engineering.vips.edu/ai-models/google-gemini-2-0-flash",
      "description": "Gemini 2.0 Flash is Google's December 2024 agent-oriented model — native tool use, multimodal input + output, and 1M context at Flash-tier cost.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Gemini 2.0 Flash",
        "Google",
        "Vertex AI",
        "multimodal",
        "tool use",
        "agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemini 2.5 Flash",
      "slug": "google-gemini-2-5-flash",
      "url": "https://learn.engineering.vips.edu/ai-models/google-gemini-2-5-flash",
      "description": "Gemini 2.5 Flash is Google's fast, low-cost 2025 workhorse — a thinking model with 1M context, native multimodality, and strong price/performance on Vertex AI and the Gemini API.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Gemini 2.5 Flash",
        "Google",
        "Vertex AI",
        "multimodal",
        "thinking model",
        "1M context"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemini 2.5 Pro",
      "slug": "google-gemini-2-5-pro",
      "url": "https://learn.engineering.vips.edu/ai-models/google-gemini-2-5-pro",
      "description": "Gemini 2.5 Pro is Google's flagship long-context multimodal model — 2M tokens, excellent video/document understanding, and tight integration with Google Cloud and Workspace.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Gemini 2.5 Pro",
        "Google",
        "DeepMind",
        "Vertex AI",
        "long context",
        "video"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemini Embedding 001",
      "slug": "google-gemini-embedding-001",
      "url": "https://learn.engineering.vips.edu/ai-models/google-gemini-embedding-001",
      "description": "Gemini Embedding 001 is Google's flagship text embedding model — 3,072-dim vectors, state-of-the-art MTEB multilingual scores, and 2K-token inputs for RAG and semantic search.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Gemini Embedding",
        "embedding model",
        "MTEB",
        "RAG",
        "semantic search",
        "Google"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemini Ultra 1.0",
      "slug": "google-gemini-ultra-1-0",
      "url": "https://learn.engineering.vips.edu/ai-models/google-gemini-ultra-1-0",
      "description": "Gemini Ultra 1.0 is Google DeepMind's original top-tier multimodal model — launched February 2024 as the MMLU-leading variant of the Gemini 1.0 family.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Gemini Ultra",
        "Google DeepMind",
        "multimodal",
        "MMLU",
        "legacy"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemma 2 2B",
      "slug": "google-gemma-2-2b",
      "url": "https://learn.engineering.vips.edu/ai-models/google-gemma-2-2b",
      "description": "Google's Gemma 2 2B is a tiny 2.6-billion-parameter open-weight model, distilled from larger Gemma teachers, ideal for edge and browser inference.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Gemma 2 2B",
        "Google",
        "DeepMind",
        "edge LLM",
        "distillation",
        "WebGPU",
        "MediaPipe"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemma 2 9B",
      "slug": "google-gemma-2-9b",
      "url": "https://learn.engineering.vips.edu/ai-models/google-gemma-2-9b",
      "description": "Gemma 2 9B is Google's 2024 open-weights small model — a 9B dense transformer that punched above its weight on English reasoning benchmarks under the Gemma license.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Gemma 2",
        "Google",
        "open weights",
        "small language model",
        "9B"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemma 3 12B",
      "slug": "google-gemma-3-12b",
      "url": "https://learn.engineering.vips.edu/ai-models/google-gemma-3-12b",
      "description": "Gemma 3 12B is Google DeepMind's open mid-size multimodal LLM with 128k context, vision input, and wide language coverage — a strong single-GPU alternative to Llama 3.1 8B.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Gemma 3",
        "12B",
        "Google DeepMind",
        "multimodal",
        "open weights"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemma 3 1B",
      "slug": "google-gemma-3-1b",
      "url": "https://learn.engineering.vips.edu/ai-models/google-gemma-3-1b",
      "description": "Gemma 3 1B is Google's ultra-compact open-weights LLM — a ~1-billion-parameter model tuned for on-device inference, classroom experiments, and edge deployments.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Gemma 3",
        "Google",
        "open weights",
        "on-device",
        "small LLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemma 3 27B",
      "slug": "google-gemma-3-27b",
      "url": "https://learn.engineering.vips.edu/ai-models/google-gemma-3-27b",
      "description": "Gemma 3 27B is Google's 2025 open-weights flagship in the Gemma family — a multimodal 27B model derived from Gemini research, with vision, long context, and the permissive Gemma license.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Gemma 3",
        "Google",
        "open weights",
        "multimodal",
        "vision LLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemma 3 4B",
      "slug": "google-gemma-3-4b",
      "url": "https://learn.engineering.vips.edu/ai-models/google-gemma-3-4b",
      "description": "Gemma 3 4B is Google DeepMind's open 4B-parameter multimodal small LLM with 128k context, vision input, and 140+ language coverage — built on Gemini 2.0 research.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Gemma 3",
        "Google DeepMind",
        "small language model",
        "multimodal",
        "open weights"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Imagen 3",
      "slug": "google-imagen-3",
      "url": "https://learn.engineering.vips.edu/ai-models/google-imagen-3",
      "description": "Imagen 3 is Google's text-to-image generation model — high-fidelity photorealism, strong typography, and SynthID watermarking, available via Vertex AI and the Gemini API.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Creativity",
      "keywords": [
        "Imagen 3",
        "Google",
        "text to image",
        "image generation",
        "SynthID",
        "Vertex AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Lyria 2",
      "slug": "google-lyria-2",
      "url": "https://learn.engineering.vips.edu/ai-models/google-lyria-2",
      "description": "Lyria 2 is Google DeepMind's second-generation text-to-music model — generates high-fidelity instrumental and vocal tracks from natural-language prompts.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Lyria 2",
        "Google DeepMind",
        "text-to-music",
        "audio generation",
        "Music AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Google MathGemma",
      "slug": "google-mathgemma",
      "url": "https://learn.engineering.vips.edu/ai-models/google-mathgemma",
      "description": "MathGemma is Google DeepMind's math-specialised member of the Gemma family — fine-tuned on high-quality mathematics corpora for step-by-step reasoning and Lean proof sketching.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "MathGemma",
        "Google DeepMind",
        "math",
        "Gemma",
        "theorem proving"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Google Med-PaLM 2",
      "slug": "google-med-palm-2",
      "url": "https://learn.engineering.vips.edu/ai-models/google-med-palm-2",
      "description": "Med-PaLM 2 is Google Research's medical-specialist LLM — 86.5% on MedQA (US Medical Licensing Exam-style) and the reference for clinical-grade domain LLMs.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Med-PaLM 2",
        "Google",
        "medical LLM",
        "MedLM",
        "MedQA",
        "healthcare AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "PaLM 2",
      "slug": "google-palm-2",
      "url": "https://learn.engineering.vips.edu/ai-models/google-palm-2",
      "description": "PaLM 2 is Google's 2023 flagship dense decoder LLM — the successor to PaLM that powered the original Bard and Duet AI for Workspace. Now deprecated in favour of the Gemini family.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "PaLM 2",
        "Google",
        "Bard",
        "legacy",
        "Duet AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Google RT-2",
      "slug": "google-robotics-rt-2",
      "url": "https://learn.engineering.vips.edu/ai-models/google-robotics-rt-2",
      "description": "RT-2 is Google DeepMind's vision-language-action (VLA) model that maps robot camera images and text instructions to low-level motor actions, generalising to novel objects and scenes.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "RT-2",
        "vision-language-action",
        "VLA",
        "Google DeepMind",
        "robotics",
        "embodied AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Google Veo 2",
      "slug": "google-veo-2",
      "url": "https://learn.engineering.vips.edu/ai-models/google-veo-2",
      "description": "Veo 2 is Google DeepMind's text-to-video model — 8-second 4K-capable clips with strong cinematic lighting and camera control, served via Vertex AI and Labs.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Veo 2",
        "Google DeepMind",
        "text to video",
        "Vertex AI",
        "cinematic"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Veo 3",
      "slug": "google-veo-3",
      "url": "https://learn.engineering.vips.edu/ai-models/google-veo-3",
      "description": "Veo 3 is Google DeepMind's May 2025 text-to-video model — generates 4K-capable clips with synchronized dialogue, ambient audio, and cinematic camera motion via Vertex AI and Gemini.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Creativity",
      "keywords": [
        "Veo 3",
        "Google DeepMind",
        "text to video",
        "generative video",
        "AI video",
        "SynthID"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Vertex AI textembedding-gecko",
      "slug": "google-vertex-embedding-gecko",
      "url": "https://learn.engineering.vips.edu/ai-models/google-vertex-embedding-gecko",
      "description": "Vertex AI textembedding-gecko is Google Cloud's managed text-embedding endpoint — a Gemini-era English embedding model exposed through Vertex AI for enterprise RAG.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "textembedding-gecko",
        "Google",
        "Vertex AI",
        "embeddings",
        "RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Ideogram v2",
      "slug": "ideogram-v2",
      "url": "https://learn.engineering.vips.edu/ai-models/ideogram-v2",
      "description": "Ideogram v2 is the text-to-image model best known for in-image typography — readable posters, logos, and UI mockups that other diffusion models struggle to render.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Ideogram v2",
        "text to image",
        "typography",
        "poster generation",
        "logo generation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Jina Embeddings v3",
      "slug": "jina-embeddings-v3",
      "url": "https://learn.engineering.vips.edu/ai-models/jina-embeddings-v3",
      "description": "Jina Embeddings v3 is an open-weight multilingual embedding model with 8k context, task LoRAs, and Matryoshka output — strong MTEB with Apache-compatible licensing.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Jina Embeddings v3",
        "open-source embeddings",
        "multilingual",
        "Matryoshka",
        "MTEB"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Jina Embeddings v4",
      "slug": "jina-embeddings-v4",
      "url": "https://learn.engineering.vips.edu/ai-models/jina-embeddings-v4",
      "description": "Jina Embeddings v4 is Jina AI's multilingual multimodal embedding model — 3.8B params, Matryoshka dimensions, late-interaction and single-vector modes for text, image, and visual-document retrieval.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Jina",
        "embeddings",
        "multimodal",
        "Matryoshka",
        "retrieval"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Jina Reranker v2",
      "slug": "jina-reranker-v2",
      "url": "https://learn.engineering.vips.edu/ai-models/jina-reranker-v2",
      "description": "Jina Reranker v2 is an open-weight multilingual cross-encoder reranker — fast, code-aware, and designed to pair with Jina Embeddings v3 for hybrid RAG.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Jina Reranker v2",
        "reranker",
        "cross-encoder",
        "multilingual",
        "code retrieval",
        "open-source"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Krea 1",
      "slug": "krea-krea-1",
      "url": "https://learn.engineering.vips.edu/ai-models/krea-krea-1",
      "description": "Krea 1 is Krea AI's first in-house text-to-image foundation model — aesthetics-focused, with real-time creative controls and strong photorealism for design workflows.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Krea 1",
        "Krea AI",
        "text-to-image",
        "real-time",
        "design"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Kling 1.5",
      "slug": "kuaishou-kling-1-5",
      "url": "https://learn.engineering.vips.edu/ai-models/kuaishou-kling-1-5",
      "description": "Kling 1.5 is Kuaishou's text-to-video diffusion-transformer model — one of the first public systems to reliably generate 2-minute 1080p videos with strong motion coherence.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Kling",
        "Kuaishou",
        "text-to-video",
        "DiT",
        "video generation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LLaVA 1.6 34B",
      "slug": "llava-llava-1-6-34b",
      "url": "https://learn.engineering.vips.edu/ai-models/llava-llava-1-6-34b",
      "description": "LLaVA 1.6 34B is an open-weight vision-language model combining Nous-Hermes-Yi-34B with a CLIP vision tower, a key reference point for open VLM research.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "LLaVA 1.6",
        "LLaVA-NeXT",
        "34B",
        "open VLM",
        "vision language",
        "CLIP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Luma Dream Machine",
      "slug": "luma-dream-machine",
      "url": "https://learn.engineering.vips.edu/ai-models/luma-dream-machine",
      "description": "Luma Dream Machine is Luma AI's text-to-video model — fast 5-second generations with strong motion, image-to-video loops, and a public API for pipeline integration.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Luma Dream Machine",
        "Luma AI",
        "text to video",
        "image to video",
        "motion"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "BART Large",
      "slug": "meta-bart-large",
      "url": "https://learn.engineering.vips.edu/ai-models/meta-bart-large",
      "description": "BART Large is Meta AI's classic 2019 sequence-to-sequence transformer — a bidirectional-encoder, autoregressive-decoder model used for summarisation, translation, and text generation.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "BART",
        "Meta",
        "Facebook",
        "seq2seq",
        "summarisation",
        "legacy"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Code Llama 13B",
      "slug": "meta-codellama-13b",
      "url": "https://learn.engineering.vips.edu/ai-models/meta-codellama-13b",
      "description": "Code Llama 13B is Meta's 13-billion-parameter open-weights code-generation model — a Llama 2 fine-tune for Python, infilling, and instruction-following coding tasks.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Code Llama",
        "Meta",
        "open weights",
        "coding",
        "Llama 2"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Code Llama 70B",
      "slug": "meta-codellama-70b",
      "url": "https://learn.engineering.vips.edu/ai-models/meta-codellama-70b",
      "description": "Code Llama 70B is Meta's code-specialized fine-tune of Llama 2 70B — a historical landmark for open-source coding models, now superseded by newer open coders like DeepSeek Coder V2 and Qwen Coder.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Code Llama",
        "Meta",
        "open weights",
        "code LLM",
        "coding assistant"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Emu 2",
      "slug": "meta-emu-2",
      "url": "https://learn.engineering.vips.edu/ai-models/meta-emu-2",
      "description": "Emu 2 is Meta's large multimodal generative model — a 37B parameter vision-language model capable of image generation, in-context editing, and multimodal reasoning.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Emu 2",
        "Meta",
        "multimodal",
        "image generation",
        "vision-language"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Llama 3.1 405B Instruct",
      "slug": "meta-llama-3-1-405b",
      "url": "https://learn.engineering.vips.edu/ai-models/meta-llama-3-1-405b",
      "description": "Llama 3.1 405B is Meta's open-weights flagship dense model — the first open release to credibly challenge closed-frontier GPT-4-class quality on reasoning and knowledge.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Llama 3.1 405B",
        "Meta",
        "open weights",
        "flagship",
        "frontier open model"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Llama 3.1 70B Instruct",
      "slug": "meta-llama-3-1-70b-instruct",
      "url": "https://learn.engineering.vips.edu/ai-models/meta-llama-3-1-70b-instruct",
      "description": "Llama 3.1 70B Instruct is Meta's mid-flagship open-weights model from July 2024 — the production workhorse that powered most of the open-source LLM boom before Llama 3.3 superseded it.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Llama 3.1",
        "70B",
        "Meta",
        "open weights",
        "instruct",
        "production LLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Llama 3.1 8B Instruct",
      "slug": "meta-llama-3-1-8b-instruct",
      "url": "https://learn.engineering.vips.edu/ai-models/meta-llama-3-1-8b-instruct",
      "description": "Llama 3.1 8B Instruct is Meta's small open-weights workhorse — an 8B dense model tuned for edge inference, laptops, and low-cost classification and summarization pipelines.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Llama 3.1 8B",
        "Meta",
        "open weights",
        "edge LLM",
        "on-device AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Llama 3.3 70B Instruct",
      "slug": "meta-llama-3-3-70b",
      "url": "https://learn.engineering.vips.edu/ai-models/meta-llama-3-3-70b",
      "description": "Meta's Llama 3.3 70B is a drop-in upgrade to Llama 3.1 70B — matching 405B-level quality in a 70B body through better post-training. The pragmatic open-weights workhorse.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Llama 3.3",
        "Meta",
        "open weights",
        "70B",
        "instruct",
        "self-host"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Llama 4 Maverick",
      "slug": "meta-llama-4-maverick",
      "url": "https://learn.engineering.vips.edu/ai-models/meta-llama-4-maverick",
      "description": "Meta's open-weights Llama 4 Maverick delivers frontier-class reasoning at self-host economics. Ideal when weights access, data sovereignty, or local inference matters more than absolute SOTA.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Llama 4",
        "Meta",
        "open weights",
        "MoE",
        "self-host",
        "open source AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Llama 4 Scout",
      "slug": "meta-llama-4-scout",
      "url": "https://learn.engineering.vips.edu/ai-models/meta-llama-4-scout",
      "description": "Meta's Llama 4 Scout is the smaller, edge-friendly sibling of Maverick — a 17B active / 109B total Mixture-of-Experts model with long context, designed for single-GPU inference and efficient fine-tuning.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Llama 4 Scout",
        "Meta",
        "open weights",
        "MoE",
        "single-GPU",
        "edge LLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Llama Guard 3",
      "slug": "meta-llama-guard-3",
      "url": "https://learn.engineering.vips.edu/ai-models/meta-llama-guard-3",
      "description": "Llama Guard 3 is Meta's open-weights content-moderation classifier — an 8B Llama fine-tune that labels prompts and responses against a configurable safety taxonomy.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Llama Guard",
        "Meta",
        "safety",
        "moderation",
        "classifier",
        "open weights"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Meta MobileLLM 1.5B",
      "slug": "meta-mobilellm-1-5b",
      "url": "https://learn.engineering.vips.edu/ai-models/meta-mobilellm-1-5b",
      "description": "MobileLLM 1.5B is Meta's sub-billion / sub-2B small language model family optimised for on-device inference — deep-and-thin architecture, embedding sharing, and grouped-query attention.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "MobileLLM",
        "Meta",
        "on-device",
        "edge AI",
        "small language model"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Prompt Guard 2",
      "slug": "meta-prompt-guard-2",
      "url": "https://learn.engineering.vips.edu/ai-models/meta-prompt-guard-2",
      "description": "Prompt Guard 2 is Meta's open-weights small classifier for detecting prompt-injection and jailbreak attempts — a sidecar filter designed to sit in front of any LLM.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Prompt Guard",
        "Meta",
        "prompt injection",
        "jailbreak",
        "safety",
        "open weights"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "SeamlessM4T v2",
      "slug": "meta-seamlessm4t-v2",
      "url": "https://learn.engineering.vips.edu/ai-models/meta-seamlessm4t-v2",
      "description": "SeamlessM4T v2 is Meta's massively multilingual and multimodal translation model — speech and text in and out across nearly 100 languages through a unified encoder-decoder stack.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "SeamlessM4T",
        "Meta",
        "translation",
        "speech",
        "multilingual"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "E5-Large v2",
      "slug": "microsoft-e5-large-v2",
      "url": "https://learn.engineering.vips.edu/ai-models/microsoft-e5-large-v2",
      "description": "E5-Large v2 is Microsoft Research's open-weights English text embedding model — a ~335M-parameter MiniLM-derived encoder widely used as a strong, cheap baseline for retrieval.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "E5",
        "E5-Large",
        "Microsoft",
        "embeddings",
        "retrieval",
        "MTEB"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Microsoft Florence-2",
      "slug": "microsoft-florence-2",
      "url": "https://learn.engineering.vips.edu/ai-models/microsoft-florence-2",
      "description": "Florence-2 is Microsoft's open vision foundation model (0.23B / 0.77B) with a unified prompt-based interface for captioning, detection, segmentation, OCR, and grounding.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Florence-2",
        "Microsoft",
        "vision foundation model",
        "open source",
        "OCR",
        "detection"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Orca-Math 7B",
      "slug": "microsoft-orca-math-7b",
      "url": "https://learn.engineering.vips.edu/ai-models/microsoft-orca-math-7b",
      "description": "Microsoft's Orca-Math 7B is a math-specialised small LLM fine-tuned on synthetic GPT-4-generated math dialogues and feedback, strong on GSM8K.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Orca-Math 7B",
        "Microsoft",
        "math LLM",
        "GSM8K",
        "SLM",
        "Mistral"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Phi-2",
      "slug": "microsoft-phi-2",
      "url": "https://learn.engineering.vips.edu/ai-models/microsoft-phi-2",
      "description": "Microsoft's Phi-2 is a 2.7B-parameter 'small but mighty' LLM trained on textbook-quality data, demonstrating how data curation beats raw model scale.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Phi-2",
        "Microsoft",
        "small LLM",
        "SLM",
        "textbook data",
        "synthetic training"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Phi-3.5 Mini",
      "slug": "microsoft-phi-3-5-mini",
      "url": "https://learn.engineering.vips.edu/ai-models/microsoft-phi-3-5-mini",
      "description": "Phi-3.5 Mini is Microsoft's 3.8B open-weights tiny model — designed for on-device inference on phones and laptops with surprisingly capable reasoning for its size.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Phi-3.5 Mini",
        "Microsoft",
        "open weights",
        "small language model",
        "on-device AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Phi-3-mini 128k",
      "slug": "microsoft-phi-3-mini-128k",
      "url": "https://learn.engineering.vips.edu/ai-models/microsoft-phi-3-mini-128k",
      "description": "Phi-3-mini 128k is Microsoft's 3.8B-parameter small language model with a 128k context window — a tiny, laptop-runnable LLM that matches GPT-3.5 on many benchmarks.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Phi-3",
        "Microsoft",
        "small language model",
        "long context",
        "edge AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Phi-4 Multimodal",
      "slug": "microsoft-phi-4-multimodal",
      "url": "https://learn.engineering.vips.edu/ai-models/microsoft-phi-4-multimodal",
      "description": "Microsoft's Phi-4 Multimodal is a 5.6B SLM unifying text, vision, and speech in one compact model, tuned for on-device and edge inference.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Phi-4 Multimodal",
        "Microsoft",
        "SLM",
        "multimodal",
        "speech",
        "vision",
        "on-device"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Phi-4",
      "slug": "microsoft-phi-4",
      "url": "https://learn.engineering.vips.edu/ai-models/microsoft-phi-4",
      "description": "Phi-4 is Microsoft Research's 14B open-weights model focused on reasoning — trained with a synthetic-data-heavy recipe that punches far above its weight class on math, logic, and coding benchmarks.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Phi-4",
        "Microsoft",
        "open weights",
        "small language model",
        "synthetic data",
        "reasoning"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Midjourney v6.1",
      "slug": "midjourney-v6-1",
      "url": "https://learn.engineering.vips.edu/ai-models/midjourney-v6-1",
      "description": "Midjourney v6.1 is the premier artistic text-to-image model — exceptional aesthetic quality accessed through Discord and the Midjourney web app rather than a public API.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Midjourney v6.1",
        "Midjourney",
        "text to image",
        "artistic",
        "Discord",
        "concept art"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MiniMax Hailuo",
      "slug": "minimax-hailuo-ai",
      "url": "https://learn.engineering.vips.edu/ai-models/minimax-hailuo-ai",
      "description": "Hailuo is MiniMax's text- and image-to-video model — a diffusion-transformer that became a viral favourite for fluid motion, realistic physics, and cinematic camera work.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Hailuo",
        "MiniMax",
        "text-to-video",
        "image-to-video",
        "video generation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Mixtral 8x22B",
      "slug": "mistral-8x22b",
      "url": "https://learn.engineering.vips.edu/ai-models/mistral-8x22b",
      "description": "Mixtral 8x22B is Mistral's flagship open-weights Mixture-of-Experts model — 141B total, 39B active per token, Apache 2.0 licensed with strong multilingual and coding ability.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Mixtral 8x22B",
        "Mistral AI",
        "open weights",
        "MoE",
        "Apache 2.0"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Mistral Codestral 22B",
      "slug": "mistral-codestral-22b",
      "url": "https://learn.engineering.vips.edu/ai-models/mistral-codestral-22b",
      "description": "Codestral 22B is Mistral AI's open-weight code LLM — 22B parameters across 80+ programming languages with strong HumanEval and fill-in-the-middle for IDE autocomplete.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Codestral",
        "Mistral AI",
        "code LLM",
        "fill in the middle",
        "HumanEval",
        "open weights"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Codestral",
      "slug": "mistral-codestral",
      "url": "https://learn.engineering.vips.edu/ai-models/mistral-codestral",
      "description": "Codestral is Mistral AI's code-specialized open-weights model — trained on 80+ programming languages with strong fill-in-the-middle support, shipped under the Mistral Non-Production License.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Codestral",
        "Mistral AI",
        "code LLM",
        "open weights",
        "FIM",
        "coding assistant"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Mistral Large 3",
      "slug": "mistral-large-3",
      "url": "https://learn.engineering.vips.edu/ai-models/mistral-large-3",
      "description": "Mistral Large 3 is Mistral AI's European flagship — strong multilingual reasoning, function calling, and data-sovereignty-friendly deployment through Mistral La Plateforme and Azure.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Mistral Large 3",
        "Mistral AI",
        "European",
        "GDPR",
        "function calling",
        "multilingual"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Mathstral 7B",
      "slug": "mistral-mathstral-7b",
      "url": "https://learn.engineering.vips.edu/ai-models/mistral-mathstral-7b",
      "description": "Mathstral 7B is Mistral AI's open-weights math specialist — a 7B Mistral fine-tune aligned with Project Numina to solve Olympiad-style problems with chain-of-thought.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Mathstral",
        "Mistral",
        "math",
        "STEM",
        "open weights",
        "Project Numina"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Mistral Embed",
      "slug": "mistral-mistral-embed",
      "url": "https://learn.engineering.vips.edu/ai-models/mistral-mistral-embed",
      "description": "Mistral Embed is Mistral AI's general-purpose text embedding model — 1024 dimensions, strong English and French quality, served from la Plateforme alongside Mistral's LLMs.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Mistral Embed",
        "Mistral AI",
        "embeddings",
        "RAG",
        "la Plateforme",
        "EU hosting"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Mistral NeMo 12B",
      "slug": "mistral-nemo-12b",
      "url": "https://learn.engineering.vips.edu/ai-models/mistral-nemo-12b",
      "description": "Mistral NeMo 12B is a 12B open-weights model co-developed by Mistral and NVIDIA — Apache 2.0 licensed, multilingual, with 128K context for its size class.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Mistral NeMo",
        "NVIDIA",
        "open weights",
        "12B",
        "multilingual",
        "Apache 2.0"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Pixtral 12B",
      "slug": "mistral-pixtral-12b",
      "url": "https://learn.engineering.vips.edu/ai-models/mistral-pixtral-12b",
      "description": "Pixtral 12B is Mistral AI's first open-weights vision-language model — a 12B parameter multimodal transformer capable of image captioning, document VQA, and chart reasoning.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Pixtral",
        "Mistral",
        "vision-language",
        "multimodal",
        "open weights"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Mistral Small 24B",
      "slug": "mistral-small-24b",
      "url": "https://learn.engineering.vips.edu/ai-models/mistral-small-24b",
      "description": "Mistral Small 24B is Mistral AI's early-2025 open-weights mid-size model — a 24-billion-parameter dense decoder designed for strong reasoning per dollar on single-GPU servers.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Mistral Small",
        "Mistral",
        "open weights",
        "mid-size LLM",
        "24B"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Mistral Small 3",
      "slug": "mistral-small-3",
      "url": "https://learn.engineering.vips.edu/ai-models/mistral-small-3",
      "description": "Mistral Small 3 is a 24B open-weights model from Mistral AI — Apache 2.0 licensed, optimized for low-latency inference on a single GPU, and competitive with larger Llama variants.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Mistral Small 3",
        "Mistral AI",
        "open weights",
        "Apache 2.0",
        "low latency"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "mxbai-rerank-large-v1",
      "slug": "mixedbread-mxbai-rerank-large-v1",
      "url": "https://learn.engineering.vips.edu/ai-models/mixedbread-mxbai-rerank-large-v1",
      "description": "mxbai-rerank-large-v1 is mixedbread.ai's open cross-encoder reranking model — state-of-the-art open reranker on BEIR, Apache 2.0 licensed, drop-in replacement for Cohere Rerank.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "mxbai-rerank",
        "mixedbread",
        "reranker",
        "open source",
        "RAG",
        "BEIR"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Molmo 72B",
      "slug": "molmo-molmo-72b",
      "url": "https://learn.engineering.vips.edu/ai-models/molmo-molmo-72b",
      "description": "Allen AI's Molmo 72B is an open-weight multimodal LLM trained on the fully open PixMo dataset, rivalling closed VLMs on visual reasoning.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Molmo 72B",
        "Allen AI",
        "Ai2",
        "open VLM",
        "PixMo",
        "multimodal",
        "vision"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Kimi K2",
      "slug": "moonshot-kimi-k2",
      "url": "https://learn.engineering.vips.edu/ai-models/moonshot-kimi-k2",
      "description": "Moonshot AI's Kimi K2 is a trillion-parameter MoE model with ultra-long context, strong Chinese/English reasoning, and agentic coding.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Kimi K2",
        "Moonshot AI",
        "MoE",
        "long context",
        "agentic",
        "Chinese LLM",
        "open weights"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MPT-30B",
      "slug": "mosaic-mpt-30b",
      "url": "https://learn.engineering.vips.edu/ai-models/mosaic-mpt-30b",
      "description": "MosaicML's MPT-30B is a 2023 open-weight 30-billion-parameter transformer with 8k context, an early commercial-licence LLM still used as a baseline.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "MPT-30B",
        "MosaicML",
        "open weights",
        "Apache",
        "transformer",
        "FlashAttention"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Nomic Embed Text v2",
      "slug": "nomic-embed-text-v2",
      "url": "https://learn.engineering.vips.edu/ai-models/nomic-embed-text-v2",
      "description": "Nomic Embed Text v2 is an open-weight, fully-auditable multilingual embedding model with Matryoshka support and long-context retrieval — a transparent alternative to closed APIs.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Nomic Embed",
        "open-source embeddings",
        "multilingual",
        "Matryoshka",
        "reproducible"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "NVIDIA Cosmos",
      "slug": "nvidia-cosmos-world-model",
      "url": "https://learn.engineering.vips.edu/ai-models/nvidia-cosmos-world-model",
      "description": "NVIDIA Cosmos is a family of world foundation models that generate physics-aware video futures for training and evaluating physical-AI agents — robots, autonomous vehicles, and simulators.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Cosmos",
        "NVIDIA",
        "world model",
        "physical AI",
        "robotics",
        "autonomous vehicles"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Llama 3.1 Nemotron 70B Instruct",
      "slug": "nvidia-nemotron-70b-instruct",
      "url": "https://learn.engineering.vips.edu/ai-models/nvidia-nemotron-70b-instruct",
      "description": "Nemotron 70B Instruct is NVIDIA's fine-tune of Llama 3.1 70B with reward-model-driven post-training — open-weights, and notably strong on LMSYS Arena versus the Llama 3.1 70B base.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Nemotron 70B",
        "NVIDIA",
        "Llama 3.1 fine-tune",
        "open weights",
        "RLHF"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Nemotron Mini 4B Instruct",
      "slug": "nvidia-nemotron-mini-4b",
      "url": "https://learn.engineering.vips.edu/ai-models/nvidia-nemotron-mini-4b",
      "description": "Nemotron Mini 4B Instruct is NVIDIA's compact open-weights LLM tuned for on-device chat — a 4-billion-parameter Minitron-derived model optimised for low-latency RTX GPUs.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Nemotron Mini",
        "NVIDIA",
        "on-device",
        "4B",
        "Minitron",
        "ACE"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Nemotron Ultra 253B",
      "slug": "nvidia-nemotron-ultra-253b",
      "url": "https://learn.engineering.vips.edu/ai-models/nvidia-nemotron-ultra-253b",
      "description": "Nemotron Ultra 253B is NVIDIA's top-tier open-weights reasoning LLM — a 253B Llama-family model tuned for enterprise reasoning, math, and code.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Nemotron Ultra",
        "NVIDIA",
        "Llama Nemotron",
        "reasoning",
        "open weights"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "NV-Embed v2",
      "slug": "nvidia-nv-embed-v2",
      "url": "https://learn.engineering.vips.edu/ai-models/nvidia-nv-embed-v2",
      "description": "NV-Embed v2 is NVIDIA's open-weights English embedding model — a Mistral 7B fine-tune that topped the MTEB leaderboard with leading retrieval, classification, and STS scores.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "NV-Embed",
        "NVIDIA",
        "embeddings",
        "MTEB",
        "retrieval"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "text-embedding-ada-002 (legacy)",
      "slug": "openai-ada-002",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-ada-002",
      "description": "text-embedding-ada-002 is OpenAI's 2022 text-embedding model — a 1536-dim dense embedder that became the de facto default for early RAG systems. Now superseded by text-embedding-3-small/large.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "ada-002",
        "OpenAI",
        "embeddings",
        "legacy",
        "RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "ChatGPT 4o Canvas",
      "slug": "openai-chatgpt-4o-canvas",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-chatgpt-4o-canvas",
      "description": "ChatGPT 4o Canvas is OpenAI's side-by-side writing and coding surface — a GPT-4o variant tuned for inline edits, structured document drafting, and collaborative code review in the ChatGPT app.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "ChatGPT Canvas",
        "GPT-4o",
        "OpenAI",
        "writing",
        "collaborative editing"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DALL·E 2",
      "slug": "openai-dall-e-2",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-dall-e-2",
      "description": "DALL·E 2 is OpenAI's 2022 text-to-image diffusion model that popularised prompt-based image generation with unCLIP — a CLIP-guided prior plus cascaded diffusion decoder.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "DALL-E 2",
        "DALL·E 2",
        "OpenAI",
        "text-to-image",
        "diffusion",
        "unCLIP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenAI DALL·E 3",
      "slug": "openai-dall-e-3",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-dall-e-3",
      "description": "DALL·E 3 is OpenAI's text-to-image model integrated into ChatGPT and the OpenAI API — known for strong prompt adherence, readable text, and SDXL-era quality.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "DALL-E 3",
        "OpenAI",
        "text to image",
        "diffusion",
        "ChatGPT images",
        "image generation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPT-3.5 Turbo",
      "slug": "openai-gpt-3-5-turbo",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-gpt-3-5-turbo",
      "description": "GPT-3.5 Turbo is OpenAI's original production workhorse from the ChatGPT era — a fast, cheap 16K-context model that powered most LLM apps built between 2023 and 2024.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "GPT-3.5 Turbo",
        "OpenAI",
        "ChatGPT",
        "legacy",
        "16K context"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPT-4.1",
      "slug": "openai-gpt-4-1",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-gpt-4-1",
      "description": "GPT-4.1 is OpenAI's April 2025 refresh of GPT-4 — a 1M-context, instruction-following model built for coding, long-document work, and agent pipelines at lower cost than GPT-4o.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "GPT-4.1",
        "OpenAI",
        "LLM",
        "coding",
        "1M context",
        "instruction following",
        "API"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPT-4 Turbo",
      "slug": "openai-gpt-4-turbo",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-gpt-4-turbo",
      "description": "GPT-4 Turbo is OpenAI's late-2023 flagship — a 128K-context GPT-4 variant with cheaper pricing, JSON mode, and vision input. Still widely used in legacy enterprise stacks.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "GPT-4 Turbo",
        "OpenAI",
        "128K context",
        "JSON mode",
        "function calling",
        "legacy"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPT-4o Vision",
      "slug": "openai-gpt-4o-vision",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-gpt-4o-vision",
      "description": "GPT-4o's native vision capability lets the omni-modal model read charts, screenshots, handwriting, and documents — the workhorse VLM behind ChatGPT's image-understanding features.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "GPT-4o",
        "vision",
        "VLM",
        "OpenAI",
        "multimodal",
        "OCR"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPT-4o",
      "slug": "openai-gpt-4o",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-gpt-4o",
      "description": "GPT-4o is OpenAI's 2024 omni-modal flagship — a single model that natively handles text, vision, and audio with ~320ms voice latency and strong reasoning at lower cost than GPT-4 Turbo.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "GPT-4o",
        "OpenAI",
        "multimodal",
        "omni",
        "voice AI",
        "ChatGPT",
        "vision model"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPT-5 mini",
      "slug": "openai-gpt-5-mini",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-gpt-5-mini",
      "description": "GPT-5 mini is OpenAI's cost-efficient tier of the GPT-5 family — a unified reasoning-and-chat model that trades a small amount of quality for 5x lower price and faster responses.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "GPT-5 mini",
        "OpenAI",
        "LLM",
        "reasoning",
        "agents",
        "cost-efficient",
        "unified model"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPT-5 nano",
      "slug": "openai-gpt-5-nano",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-gpt-5-nano",
      "description": "GPT-5 nano is OpenAI's cheapest and fastest GPT-5 tier — built for ultra-low-latency classification, routing, and high-volume workloads where quality-per-dollar trumps frontier reasoning.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "GPT-5 nano",
        "OpenAI",
        "LLM",
        "low latency",
        "classification",
        "cheap AI",
        "routing"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPT-5 Thinking",
      "slug": "openai-gpt-5-thinking",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-gpt-5-thinking",
      "description": "GPT-5 Thinking is OpenAI's flagship deliberate-reasoning mode — a variant of GPT-5 that spends extra inference tokens on hard math, code, and agent planning.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "GPT-5",
        "GPT-5 Thinking",
        "OpenAI",
        "reasoning",
        "chain of thought",
        "agentic"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPT-5",
      "slug": "openai-gpt-5",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-gpt-5",
      "description": "GPT-5 is OpenAI's 2026 flagship multimodal LLM — native audio/vision, unified reasoning modes, and deep ChatGPT + API integration. The default general-purpose model for most teams.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "GPT-5",
        "OpenAI",
        "LLM",
        "multimodal",
        "Responses API",
        "ChatGPT"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPT Realtime",
      "slug": "openai-gpt-realtime",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-gpt-realtime",
      "description": "GPT Realtime is OpenAI's low-latency speech-to-speech model for voice agents — direct audio in, audio out, ~300ms turn-taking, function calling, and interruptions supported over WebRTC.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "GPT Realtime",
        "OpenAI",
        "voice AI",
        "speech to speech",
        "WebRTC",
        "voice agent",
        "Realtime API"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenAI o1 Pro",
      "slug": "openai-o1-pro",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-o1-pro",
      "description": "OpenAI o1 Pro is the top-tier variant of the o1 reasoning series — a slower, more deliberate thinking model that spends additional inference compute on hard math, science, and coding problems.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "o1 Pro",
        "OpenAI",
        "reasoning",
        "chain of thought",
        "math",
        "ChatGPT Pro"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenAI o1",
      "slug": "openai-o1",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-o1",
      "description": "OpenAI o1 is the September 2024 reasoning model that launched the \"thinking model\" era — trained with reinforcement learning to produce long internal chains of thought before answering.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "OpenAI o1",
        "reasoning model",
        "chain of thought",
        "thinking model",
        "AIME",
        "Codeforces",
        "RL"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenAI o3",
      "slug": "openai-o3",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-o3",
      "description": "OpenAI o3 is the April 2025 successor to o1 — a reasoning model with tool use, vision, and dramatically better scores on ARC-AGI, SWE-bench, and graduate-level science benchmarks.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "OpenAI o3",
        "reasoning model",
        "ARC-AGI",
        "tool use",
        "SWE-bench",
        "thinking model"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenAI o4-mini",
      "slug": "openai-o4-mini",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-o4-mini",
      "description": "o4-mini is OpenAI's small reasoning model — a fast, cheap thinking model that matches or beats o3 on many math and coding benchmarks at a fraction of the cost.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "o4-mini",
        "OpenAI",
        "reasoning model",
        "AIME",
        "thinking model",
        "small model"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenAI Sora",
      "slug": "openai-sora",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-sora",
      "description": "Sora is OpenAI's text-to-video model — generates up to 20-second 1080p clips from prompts, reference images, or remix edits, served through sora.com for ChatGPT Plus users.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Sora",
        "OpenAI",
        "text to video",
        "video generation",
        "storyboarding"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenAI text-embedding-3-large",
      "slug": "openai-text-embedding-3-large",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-text-embedding-3-large",
      "description": "OpenAI text-embedding-3-large is a 3072-dim retrieval embedding model with Matryoshka support — top MTEB scores and the default choice for production RAG on the OpenAI stack.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "text-embedding-3-large",
        "OpenAI",
        "embeddings",
        "MTEB",
        "RAG",
        "Matryoshka"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenAI text-embedding-3-small",
      "slug": "openai-text-embedding-3-small",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-text-embedding-3-small",
      "description": "OpenAI text-embedding-3-small is a 1536-dim embedding model optimised for throughput — the cheap default for large-scale RAG ingestion on the OpenAI API.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "text-embedding-3-small",
        "OpenAI",
        "embeddings",
        "RAG",
        "cheap embeddings",
        "MTEB"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenAI TTS-1-HD",
      "slug": "openai-tts-hd",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-tts-hd",
      "description": "OpenAI TTS-1-HD is OpenAI's high-fidelity text-to-speech model — six built-in voices for audiobooks, voice agents, and low-latency speech UX on the OpenAI API.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "OpenAI TTS",
        "tts-1-hd",
        "text to speech",
        "audiobooks",
        "voice agents"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenAI Whisper v3 (large-v3)",
      "slug": "openai-whisper-v3",
      "url": "https://learn.engineering.vips.edu/ai-models/openai-whisper-v3",
      "description": "Whisper large-v3 is OpenAI's open-weight speech-to-text model — 99 languages with strong WER on accented speech; a default for open-source transcription pipelines.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Whisper",
        "OpenAI",
        "speech to text",
        "ASR",
        "open source",
        "transcription"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "InternVL 2.5",
      "slug": "opengvlab-intern-vl-2-5",
      "url": "https://learn.engineering.vips.edu/ai-models/opengvlab-intern-vl-2-5",
      "description": "InternVL 2.5 is OpenGVLab's open multimodal model family (1B–78B) matching GPT-4o on MMMU through scaled training, test-time scaling, and long-chain reasoning.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "InternVL 2.5",
        "OpenGVLab",
        "open VLM",
        "MMMU",
        "multimodal"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Physical Intelligence π0",
      "slug": "physical-intelligence-pi0",
      "url": "https://learn.engineering.vips.edu/ai-models/physical-intelligence-pi0",
      "description": "π0 (pi-zero) is Physical Intelligence's generalist robot foundation model — a flow-matching vision-language-action policy trained on diverse multi-embodiment data for dexterous manipulation.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "pi0",
        "π0",
        "Physical Intelligence",
        "robot foundation model",
        "flow matching",
        "VLA"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Pika 2.0",
      "slug": "pika-labs-pika-2-0",
      "url": "https://learn.engineering.vips.edu/ai-models/pika-labs-pika-2-0",
      "description": "Pika 2.0 is Pika Labs' text-to-video model with a signature 'Scene Ingredients' feature for compositing characters, objects, and locations across shots.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Pika",
        "Pika 2.0",
        "Pika Labs",
        "text-to-video",
        "Scene Ingredients"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Qodo Gen 1",
      "slug": "qodo-qodo-gen-1",
      "url": "https://learn.engineering.vips.edu/ai-models/qodo-qodo-gen-1",
      "description": "Qodo (formerly CodiumAI) Qodo Gen 1 is a specialised code-generation and test-writing LLM tuned for IDE-integrated review and unit-test synthesis.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Qodo",
        "CodiumAI",
        "code generation",
        "unit tests",
        "IDE",
        "PR review"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Qwen 2.5 3B",
      "slug": "qwen-qwen-2-5-3b",
      "url": "https://learn.engineering.vips.edu/ai-models/qwen-qwen-2-5-3b",
      "description": "Qwen 2.5 3B is Alibaba's compact open small language model — a 3B-parameter LLM with 128k context, tool-use training, and multilingual coverage in 29 languages.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Qwen 2.5",
        "3B",
        "Alibaba",
        "small language model",
        "open weights"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Recraft V3",
      "slug": "recraft-recraft-v3",
      "url": "https://learn.engineering.vips.edu/ai-models/recraft-recraft-v3",
      "description": "Recraft V3 is a closed text-to-image model known for industry-leading text rendering and vector-style outputs — the model that topped Artificial Analysis's image leaderboard on launch.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Recraft V3",
        "Recraft",
        "text-to-image",
        "typography",
        "vector"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Reka Core",
      "slug": "reka-reka-core",
      "url": "https://learn.engineering.vips.edu/ai-models/reka-reka-core",
      "description": "Reka AI's Reka Core is a 2024 frontier-tier multimodal LLM with image, video, and audio understanding plus 128k context and multilingual coverage.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Reka Core",
        "Reka AI",
        "multimodal",
        "video LLM",
        "audio LLM",
        "128k"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Reka Flash 3",
      "slug": "reka-reka-flash-3",
      "url": "https://learn.engineering.vips.edu/ai-models/reka-reka-flash-3",
      "description": "Reka AI's Reka Flash 3 is a 21B open-weight reasoning LLM released in 2025 with 32k context and strong performance-per-dollar for enterprise use.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Reka Flash 3",
        "Reka AI",
        "open weights",
        "reasoning",
        "21B",
        "Apache 2.0"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Reka Vision",
      "slug": "reka-reka-vision",
      "url": "https://learn.engineering.vips.edu/ai-models/reka-reka-vision",
      "description": "Reka AI's Reka Vision is a multimodal product for enterprise video and image understanding, built on the Reka Core/Flash models with retrieval-grade search.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Reka Vision",
        "Reka AI",
        "video understanding",
        "multimodal",
        "visual search",
        "enterprise"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Replit Code v3",
      "slug": "replit-replit-code-v3",
      "url": "https://learn.engineering.vips.edu/ai-models/replit-replit-code-v3",
      "description": "Replit Code v3 is Replit's in-house code LLM powering Replit Agent and Ghostwriter, tuned for cloud-IDE completions and full-stack app synthesis.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Replit Code v3",
        "Replit Agent",
        "Ghostwriter",
        "code LLM",
        "cloud IDE"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Resemble Rapid Voice Cloning",
      "slug": "resemble-ai-rapid-voice-cloning",
      "url": "https://learn.engineering.vips.edu/ai-models/resemble-ai-rapid-voice-cloning",
      "description": "Resemble AI's Rapid Voice Cloning creates a high-fidelity custom voice from 10 seconds of reference audio, paired with a watermarking stack for responsible synthetic speech.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Resemble AI",
        "voice cloning",
        "TTS",
        "watermarking",
        "deepfake detection"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Runway Gen-3 Alpha",
      "slug": "runway-gen-3-alpha",
      "url": "https://learn.engineering.vips.edu/ai-models/runway-gen-3-alpha",
      "description": "Runway Gen-3 Alpha is Runway's flagship video generator for filmmakers — 10-second clips with strong character consistency and a polished editing UI.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Runway Gen-3",
        "Runway",
        "text to video",
        "image to video",
        "filmmaking"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Scientist v2",
      "slug": "sakana-ai-scientist-v2",
      "url": "https://learn.engineering.vips.edu/ai-models/sakana-ai-scientist-v2",
      "description": "Sakana AI's AI Scientist v2 is an autonomous research agent that generates, runs, and writes up machine-learning experiments end-to-end.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "AI Scientist",
        "Sakana AI",
        "autonomous research",
        "agent",
        "ML paper generation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Sakana Evolutionary Model Merge",
      "slug": "sakana-sakana-evolutionary-optimization",
      "url": "https://learn.engineering.vips.edu/ai-models/sakana-sakana-evolutionary-optimization",
      "description": "Sakana AI's Evolutionary Model Merge is a research system that uses evolutionary algorithms to combine open-weights LLMs — automatically discovering high-performing merged checkpoints.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Sakana AI",
        "evolutionary optimization",
        "model merging",
        "EvoLLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "SFR-Embedding-Mistral",
      "slug": "salesforce-sfr-embedding-mistral",
      "url": "https://learn.engineering.vips.edu/ai-models/salesforce-sfr-embedding-mistral",
      "description": "SFR-Embedding-Mistral is Salesforce Research's open-weights English embedding model — a Mistral 7B fine-tune that led the MTEB leaderboard at release.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "SFR-Embedding",
        "Salesforce",
        "embeddings",
        "Mistral",
        "MTEB"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "all-mpnet-base-v2",
      "slug": "sbert-all-mpnet-base-v2",
      "url": "https://learn.engineering.vips.edu/ai-models/sbert-all-mpnet-base-v2",
      "description": "all-mpnet-base-v2 is sentence-transformers' most widely used open English embedding model — a 110M MPNet fine-tune that has been the default RAG encoder for years.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "sentence-transformers",
        "MPNet",
        "embeddings",
        "RAG",
        "open weights"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Shengshu Vidu",
      "slug": "shengshu-vidu",
      "url": "https://learn.engineering.vips.edu/ai-models/shengshu-vidu",
      "description": "Vidu is Shengshu Technology and Tsinghua's text- and image-to-video model, based on the U-ViT diffusion-transformer — the first Chinese Sora-class public video generator.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Vidu",
        "Shengshu",
        "U-ViT",
        "text-to-video",
        "China",
        "video generation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Skywork-o1-Open",
      "slug": "skywork-skywork-o1-open",
      "url": "https://learn.engineering.vips.edu/ai-models/skywork-skywork-o1-open",
      "description": "Skywork's Skywork-o1-Open is an open-weight reasoning model family (8B/32B) reproducing o1-style chain-of-thought with strong math and code performance.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Skywork-o1",
        "Skywork",
        "Kunlun Tech",
        "reasoning",
        "open weights",
        "o1-style",
        "PRM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Japanese Stable LM 2",
      "slug": "stability-japanese-stable-lm-2",
      "url": "https://learn.engineering.vips.edu/ai-models/stability-japanese-stable-lm-2",
      "description": "Japanese Stable LM 2 is Stability AI Japan's open-weights Japanese-language LLM — a 1.6B Japanese-specialised model built from the Stable LM 2 backbone.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Japanese Stable LM",
        "Stability AI Japan",
        "Japanese",
        "open weights"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Stable Diffusion XL 1.0",
      "slug": "stability-sdxl-1-0",
      "url": "https://learn.engineering.vips.edu/ai-models/stability-sdxl-1-0",
      "description": "SDXL 1.0 is Stability AI's July 2023 open-weights text-to-image diffusion model — a 2.6B-parameter U-Net with a refiner, widely used as the default open image generator.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "SDXL",
        "Stable Diffusion XL",
        "Stability AI",
        "open source",
        "text-to-image",
        "diffusion"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Stable Audio 2",
      "slug": "stability-stable-audio-2",
      "url": "https://learn.engineering.vips.edu/ai-models/stability-stable-audio-2",
      "description": "Stable Audio 2 is Stability AI's text-to-audio model — generates full-length (up to 3-minute) music and sound-effect tracks from text prompts with optional audio-to-audio conditioning.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Stable Audio",
        "Stability AI",
        "text-to-audio",
        "music generation",
        "sound effects"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Stable Cascade",
      "slug": "stability-stable-cascade",
      "url": "https://learn.engineering.vips.edu/ai-models/stability-stable-cascade",
      "description": "Stable Cascade is Stability AI's three-stage cascaded text-to-image model based on the Würstchen architecture — efficient high-resolution generation in a tiny latent space.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Stable Cascade",
        "Würstchen",
        "Stability AI",
        "diffusion",
        "open source"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Stable Code 3B",
      "slug": "stability-stable-code-3b",
      "url": "https://learn.engineering.vips.edu/ai-models/stability-stable-code-3b",
      "description": "Stability AI's Stable Code 3B is a tiny 3-billion-parameter code LLM with FIM support, strong for offline IDE completions on commodity hardware.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Stable Code 3B",
        "Stability AI",
        "small LLM",
        "FIM",
        "local code",
        "IDE"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Stable Diffusion 2.1",
      "slug": "stability-stable-diffusion-2-1",
      "url": "https://learn.engineering.vips.edu/ai-models/stability-stable-diffusion-2-1",
      "description": "Stable Diffusion 2.1 is Stability AI's late-2022 text-to-image latent diffusion model — a 768x768 successor to SD 1.5 with OpenCLIP H/14 conditioning. Now a legacy baseline.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Stable Diffusion 2.1",
        "Stability AI",
        "text-to-image",
        "diffusion",
        "legacy"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Stable Diffusion 3.5 Large",
      "slug": "stability-stable-diffusion-3-5-large",
      "url": "https://learn.engineering.vips.edu/ai-models/stability-stable-diffusion-3-5-large",
      "description": "Stable Diffusion 3.5 Large is Stability AI's 8B-parameter MMDiT text-to-image model — open weights for research and community use with strong prompt adherence and typography.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Stable Diffusion 3.5",
        "Stability AI",
        "MMDiT",
        "text to image",
        "open weights",
        "ComfyUI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Stable Video Diffusion",
      "slug": "stability-stable-video-diffusion",
      "url": "https://learn.engineering.vips.edu/ai-models/stability-stable-video-diffusion",
      "description": "Stable Video Diffusion is Stability AI's image-to-video latent diffusion model — generates short, coherent video clips from a single still image using a Stable Diffusion backbone.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Stable Video Diffusion",
        "Stability AI",
        "video generation",
        "open weights",
        "diffusion"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Stable LM 2 1.6B",
      "slug": "stabilityai-stable-lm-2-1-6b",
      "url": "https://learn.engineering.vips.edu/ai-models/stabilityai-stable-lm-2-1-6b",
      "description": "Stability AI's Stable LM 2 1.6B is a tiny multilingual open-weight LLM trained on 2T tokens, strong for its size with 4k context.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Stable LM 2",
        "Stability AI",
        "tiny LLM",
        "multilingual",
        "edge"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenVLA",
      "slug": "stanford-openvla",
      "url": "https://learn.engineering.vips.edu/ai-models/stanford-openvla",
      "description": "OpenVLA is a 7B-parameter open-source vision-language-action model trained on the Open X-Embodiment dataset — a permissively licensed robot foundation model for manipulation research.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "OpenVLA",
        "open source",
        "vision-language-action",
        "Open X-Embodiment",
        "robotics"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Suno v3.5",
      "slug": "suno-suno-v3-5",
      "url": "https://learn.engineering.vips.edu/ai-models/suno-suno-v3-5",
      "description": "Suno v3.5 is Suno AI's 2024 music-generation model — produces full songs with vocals, lyrics, and production up to four minutes from a single text prompt.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Suno",
        "music generation",
        "AI music",
        "text-to-music"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Hunyuan-Large",
      "slug": "tencent-hunyuan-large",
      "url": "https://learn.engineering.vips.edu/ai-models/tencent-hunyuan-large",
      "description": "Tencent's Hunyuan-Large is a 389B-parameter open-weight MoE model (52B active) with 256k context, strong on Chinese tasks and math reasoning.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Hunyuan-Large",
        "Tencent",
        "MoE",
        "open weights",
        "Chinese LLM",
        "256k context"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "TinyLlama 1.1B",
      "slug": "tinyllama-1-1b",
      "url": "https://learn.engineering.vips.edu/ai-models/tinyllama-1-1b",
      "description": "TinyLlama is an open community effort to pretrain a 1.1B-parameter Llama-architecture model on 3T tokens — a compact, hackable, edge-friendly LLM.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "TinyLlama",
        "1.1B",
        "Llama",
        "open source",
        "tiny LLM",
        "community"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Udio v1.5",
      "slug": "udio-udio-v1-5",
      "url": "https://learn.engineering.vips.edu/ai-models/udio-udio-v1-5",
      "description": "Udio v1.5 is Udio's music-generation model from the ex-DeepMind team — text-to-music with rich audio fidelity, long-form generation, and detailed lyric control.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Udio",
        "music generation",
        "AI music",
        "text-to-music"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "VILA 1.5 40B",
      "slug": "vila-vila-1-5-40b",
      "url": "https://learn.engineering.vips.edu/ai-models/vila-vila-1-5-40b",
      "description": "NVIDIA's VILA 1.5 40B is an open-weight visual language model with multi-image and video support, strong on in-context learning for visual tasks.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "VILA 1.5",
        "NVIDIA",
        "open VLM",
        "visual language",
        "multi-image",
        "video"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Voyage AI voyage-3",
      "slug": "voyage-ai-voyage-3",
      "url": "https://learn.engineering.vips.edu/ai-models/voyage-ai-voyage-3",
      "description": "Voyage AI voyage-3 is a retrieval-first embedding model family — voyage-3 and voyage-3-lite — built for RAG, with domain-specialised variants for code, law, and finance.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Voyage AI",
        "voyage-3",
        "embeddings",
        "RAG",
        "Anthropic",
        "domain embeddings"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Grok 1.5",
      "slug": "xai-grok-1-5",
      "url": "https://learn.engineering.vips.edu/ai-models/xai-grok-1-5",
      "description": "Grok 1.5 is xAI's March 2024 upgrade over Grok-1, extending context to 128k and significantly improving reasoning, math, and code performance.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Grok 1.5",
        "xAI",
        "LLM",
        "long context",
        "reasoning"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Grok 2 Vision",
      "slug": "xai-grok-2-vision",
      "url": "https://learn.engineering.vips.edu/ai-models/xai-grok-2-vision",
      "description": "Grok 2 Vision is xAI's 2024 multimodal LLM adding image understanding to the Grok line, with 32k context and competitive pricing for visual Q&A.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Grok 2 Vision",
        "xAI",
        "vision LLM",
        "multimodal",
        "image understanding"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Grok 2",
      "slug": "xai-grok-2",
      "url": "https://learn.engineering.vips.edu/ai-models/xai-grok-2",
      "description": "Grok 2 is xAI's second-generation chat model — a frontier-tier LLM with image understanding and X (Twitter) real-time retrieval, released August 2024.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Grok 2",
        "xAI",
        "LLM",
        "real-time search",
        "X",
        "Twitter"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Grok 3",
      "slug": "xai-grok-3",
      "url": "https://learn.engineering.vips.edu/ai-models/xai-grok-3",
      "description": "Grok 3 is xAI's 2025 flagship LLM, known for its 'Think' reasoning mode and live X integration. 128k context, strong on math and coding.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Grok 3",
        "xAI",
        "Think mode",
        "reasoning",
        "X integration"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Grok 4",
      "slug": "xai-grok-4",
      "url": "https://learn.engineering.vips.edu/ai-models/xai-grok-4",
      "description": "xAI's Grok 4 is Elon Musk's flagship reasoning LLM for 2026, with native tool use, a 256k context, and real-time X (Twitter) grounding via Grok-Search.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "Grok 4",
        "xAI",
        "Elon Musk",
        "LLM",
        "real-time",
        "X platform",
        "Think mode"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GLM-4 Plus",
      "slug": "zhipu-glm-4-plus",
      "url": "https://learn.engineering.vips.edu/ai-models/zhipu-glm-4-plus",
      "description": "Zhipu AI's GLM-4 Plus is a Chinese flagship LLM with 128k context, strong on bilingual (Chinese/English) tasks, reasoning, and tool use.",
      "category": "models",
      "categoryTitle": "AI Models",
      "pillar": "Curiosity",
      "keywords": [
        "GLM-4 Plus",
        "Zhipu AI",
        "ChatGLM",
        "Chinese LLM",
        "Tsinghua",
        "bilingual"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "A2A Protocol vs Anthropic MCP",
      "slug": "a2a-protocol-vs-anthropic-mcp",
      "url": "https://learn.engineering.vips.edu/compare/a2a-protocol-vs-anthropic-mcp",
      "description": "A2A (Agent-to-Agent) is Google's protocol for agents talking to other agents; MCP is Anthropic's protocol for LLMs consuming tools, resources, and prompts. Complementary, not competitive — use both.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "A2A protocol",
        "MCP",
        "Model Context Protocol",
        "agent communication",
        "agentic AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Memory Patterns vs RAG",
      "slug": "agent-memory-patterns-vs-rag",
      "url": "https://learn.engineering.vips.edu/compare/agent-memory-patterns-vs-rag",
      "description": "RAG pulls relevant context from a corpus at inference time; agent memory patterns maintain evolving per-agent or per-user state across sessions. Different problems, often used together in real systems.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Creativity",
      "keywords": [
        "agent memory",
        "RAG",
        "retrieval augmented generation",
        "long term memory",
        "LLM state"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Memory vs Long Context",
      "slug": "agent-memory-vs-long-context",
      "url": "https://learn.engineering.vips.edu/compare/agent-memory-vs-long-context",
      "description": "Agent memory stores, retrieves, and curates facts across sessions; long context stuffs everything into a single model call. Memory scales across time; long context scales within a turn.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "agent memory",
        "long context",
        "LLM memory",
        "context window",
        "Mem0",
        "Zep"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Aider vs Continue.dev",
      "slug": "aider-vs-continue-dev",
      "url": "https://learn.engineering.vips.edu/compare/aider-vs-continue-dev",
      "description": "Aider is a terminal-first coding assistant with git-commit discipline; Continue.dev is an IDE-native open-source coding assistant for VS Code and JetBrains. Pick by whether you live in the terminal or the IDE.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Aider",
        "Continue.dev",
        "AI coding assistant",
        "open source Copilot",
        "pair programming"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Aider vs Cursor",
      "slug": "aider-vs-cursor",
      "url": "https://learn.engineering.vips.edu/compare/aider-vs-cursor",
      "description": "Aider is an open-source AI pair-programmer that runs in your terminal. Cursor is a proprietary AI-first IDE (VS Code fork). Pick by workflow: terminal vs IDE.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Aider vs Cursor",
        "AI coding tools",
        "pair programmer",
        "AI IDE",
        "terminal AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Qwen 2.5 72B vs Llama 3.3 70B",
      "slug": "alibaba-qwen-2-5-72b-vs-meta-llama-3-3-70b",
      "url": "https://learn.engineering.vips.edu/compare/alibaba-qwen-2-5-72b-vs-meta-llama-3-3-70b",
      "description": "Qwen 2.5 72B and Llama 3.3 70B are the two dominant open-weight 70B-class models. Qwen wins on math, Chinese, and multilingual; Llama on English and ecosystem.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Qwen 2.5 72B",
        "Llama 3.3 70B",
        "open-weight LLM",
        "LLM comparison",
        "self-hosted"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Qwen 2.5 Coder 32B vs DeepSeek Coder V2",
      "slug": "alibaba-qwen-2-5-coder-32b-vs-deepseek-coder-v2",
      "url": "https://learn.engineering.vips.edu/compare/alibaba-qwen-2-5-coder-32b-vs-deepseek-coder-v2",
      "description": "Both are leading open-weights code models. Qwen 2.5 Coder 32B is denser and strong at single-file completion; DeepSeek Coder V2 is MoE, longer context, stronger on repo-scale reasoning.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Qwen 2.5 Coder",
        "DeepSeek Coder V2",
        "open coding LLM",
        "code completion",
        "FIM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Qwen 3 vs QwQ-32B",
      "slug": "alibaba-qwen-3-vs-alibaba-qwq-32b",
      "url": "https://learn.engineering.vips.edu/compare/alibaba-qwen-3-vs-alibaba-qwq-32b",
      "description": "Qwen 3 is the general-purpose family covering chat, code, and agents; QwQ-32B is the reasoning-specialised 32B model with visible chain-of-thought. Pick by whether you need a fleet or a deep thinker.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Qwen 3",
        "QwQ-32B",
        "reasoning models",
        "open-weight LLM",
        "Alibaba"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Qwen 3 vs DeepSeek V3",
      "slug": "alibaba-qwen-3-vs-deepseek-v3",
      "url": "https://learn.engineering.vips.edu/compare/alibaba-qwen-3-vs-deepseek-v3",
      "description": "Qwen 3 and DeepSeek V3 are the two leading open-weights Chinese frontier LLMs as of 2026-04. Qwen 3 wins breadth and multilinguality; DeepSeek V3 wins on reasoning and MoE efficiency.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Qwen vs DeepSeek",
        "Qwen 3",
        "DeepSeek V3",
        "Chinese LLM",
        "open-weights"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Alibaba Qwen 3 vs Meta Llama 3.3 70B",
      "slug": "alibaba-qwen-3-vs-meta-llama-3-3-70b",
      "url": "https://learn.engineering.vips.edu/compare/alibaba-qwen-3-vs-meta-llama-3-3-70b",
      "description": "Qwen 3 wins on multilingual (esp. Chinese and Asian languages), code, and a wider size ladder; Llama 3.3 70B wins on English instruction following, ecosystem tooling, and licensing clarity for Western enterprises.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Qwen 3",
        "Llama 3.3 70B",
        "open-weight LLM",
        "multilingual LLM",
        "model comparison"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "QwQ-32B vs DeepSeek R1 (open reasoning)",
      "slug": "alibaba-qwq-32b-vs-deepseek-r1",
      "url": "https://learn.engineering.vips.edu/compare/alibaba-qwq-32b-vs-deepseek-r1",
      "description": "QwQ-32B and DeepSeek R1 are the leading open-weight reasoning models. QwQ is smaller and easier to self-host; R1 is larger and more capable but needs serious hardware.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "QwQ-32B",
        "DeepSeek R1",
        "open reasoning model",
        "self-hosted LLM",
        "LLM comparison"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude 3.5 Haiku vs Claude 3.5 Sonnet",
      "slug": "anthropic-claude-3-5-haiku-vs-anthropic-claude-3-5-sonnet",
      "url": "https://learn.engineering.vips.edu/compare/anthropic-claude-3-5-haiku-vs-anthropic-claude-3-5-sonnet",
      "description": "Claude 3.5 Haiku wins on latency and cost for high-volume tasks; Claude 3.5 Sonnet wins on reasoning depth, coding, and complex tool use. Most teams route by task complexity between the two.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Claude 3.5 Haiku",
        "Claude 3.5 Sonnet",
        "Anthropic",
        "model routing",
        "cost optimisation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude 3.5 Sonnet vs GPT-4o",
      "slug": "anthropic-claude-3-5-sonnet-vs-openai-gpt-4o",
      "url": "https://learn.engineering.vips.edu/compare/anthropic-claude-3-5-sonnet-vs-openai-gpt-4o",
      "description": "Claude 3.5 Sonnet and GPT-4o defined the mid-2024 mid-tier model landscape. Claude wins on coding and reasoning; GPT-4o wins on voice and ecosystem. Both are now legacy.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Claude 3.5 Sonnet",
        "GPT-4o",
        "legacy LLM",
        "LLM comparison",
        "coding agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude 3 Haiku vs Claude 3.5 Haiku",
      "slug": "anthropic-claude-3-haiku-vs-anthropic-claude-3-5-haiku",
      "url": "https://learn.engineering.vips.edu/compare/anthropic-claude-3-haiku-vs-anthropic-claude-3-5-haiku",
      "description": "Claude 3 Haiku (2024) is Anthropic's original cheapest and fastest Claude tier; Claude 3.5 Haiku is the refreshed model that delivers near-Sonnet-class reasoning in the same Haiku latency envelope at slightly higher cost.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Claude 3 Haiku",
        "Claude 3.5 Haiku",
        "small LLM",
        "Anthropic",
        "cheap model"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude 3 Opus vs GPT-4o",
      "slug": "anthropic-claude-3-opus-vs-openai-gpt-4o",
      "url": "https://learn.engineering.vips.edu/compare/anthropic-claude-3-opus-vs-openai-gpt-4o",
      "description": "Two 2024-era flagship models, now both legacy. Claude 3 Opus was the writing and reasoning leader; GPT-4o added native multimodal. Use this page to decide legacy migrations, not new builds.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Claude 3 Opus",
        "GPT-4o",
        "legacy LLM comparison",
        "Claude vs GPT",
        "2024 flagship models"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Haiku 4.5 vs Gemini 2.5 Flash",
      "slug": "anthropic-claude-haiku-4-5-vs-google-gemini-2-5-flash",
      "url": "https://learn.engineering.vips.edu/compare/anthropic-claude-haiku-4-5-vs-google-gemini-2-5-flash",
      "description": "Claude Haiku 4.5 and Gemini 2.5 Flash are the dominant cheap-and-fast models. Haiku wins on agent reliability; Flash wins on long context and price.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Claude Haiku 4.5",
        "Gemini 2.5 Flash",
        "cheap LLM",
        "LLM comparison",
        "RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Haiku 4.5 vs Mistral Small 3",
      "slug": "anthropic-claude-haiku-4-5-vs-mistral-small-3",
      "url": "https://learn.engineering.vips.edu/compare/anthropic-claude-haiku-4-5-vs-mistral-small-3",
      "description": "Claude Haiku 4.5 is Anthropic's low-latency tier with frontier-adjacent quality. Mistral Small 3 is a dense 24B open-weights model that's fast, cheap, and self-hostable. Pick by open-weights need.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Claude Haiku vs Mistral",
        "Mistral Small 3",
        "small LLM",
        "open-weights",
        "cheap LLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Haiku 4.5 vs GPT-4o",
      "slug": "anthropic-claude-haiku-4-5-vs-openai-gpt-4o",
      "url": "https://learn.engineering.vips.edu/compare/anthropic-claude-haiku-4-5-vs-openai-gpt-4o",
      "description": "Claude Haiku 4.5 is Anthropic's current small-model workhorse; GPT-4o is OpenAI's 2024 flagship, now mid-tier. Haiku 4.5 is cheaper, faster, and newer on agent tasks.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Claude Haiku 4.5",
        "GPT-4o",
        "small LLM comparison",
        "cheap LLM",
        "agent LLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Haiku 4.5 vs GPT-5 nano",
      "slug": "anthropic-claude-haiku-4-5-vs-openai-gpt-5-nano",
      "url": "https://learn.engineering.vips.edu/compare/anthropic-claude-haiku-4-5-vs-openai-gpt-5-nano",
      "description": "Claude Haiku 4.5 and GPT-5 nano are the cheapest frontier-family models. Haiku wins on quality and tool calls; nano wins on raw latency and cost-per-million. Both are fine for high-volume workloads.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Claude Haiku 4.5",
        "GPT-5 nano",
        "small LLM",
        "cheap LLM",
        "LLM comparison"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Opus 4.7 vs DeepSeek-Coder V2 (for coding)",
      "slug": "anthropic-claude-opus-4-7-vs-deepseek-coder-v2",
      "url": "https://learn.engineering.vips.edu/compare/anthropic-claude-opus-4-7-vs-deepseek-coder-v2",
      "description": "Claude Opus 4.7 is the premium coding-agent model; DeepSeek-Coder V2 is a strong open-weight coding specialist. Opus wins on agent reliability; DeepSeek-Coder wins on cost and self-hosting.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Claude Opus 4.7",
        "DeepSeek-Coder V2",
        "coding model",
        "open-weight coder",
        "LLM comparison"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Opus 4.7 vs OpenAI o1",
      "slug": "anthropic-claude-opus-4-7-vs-openai-o1",
      "url": "https://learn.engineering.vips.edu/compare/anthropic-claude-opus-4-7-vs-openai-o1",
      "description": "Claude Opus 4.7 is a general-purpose frontier model with strong agentic reasoning; OpenAI o1 is a reasoning-specialised model with deep deliberative chain-of-thought. Pick by workload shape — agents vs single-shot hard problems.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Claude vs o1",
        "OpenAI o1",
        "reasoning models",
        "deliberative reasoning",
        "LLM comparison"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Opus 4.7 vs OpenAI o3",
      "slug": "anthropic-claude-opus-4-7-vs-openai-o3",
      "url": "https://learn.engineering.vips.edu/compare/anthropic-claude-opus-4-7-vs-openai-o3",
      "description": "Claude Opus 4.7 is the strongest general-purpose agent model; o3 is a dedicated reasoning model. Opus wins on tool use and breadth; o3 wins on hard math and verified-solution problems.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Claude Opus 4.7",
        "OpenAI o3",
        "reasoning model",
        "LLM comparison",
        "coding agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Sonnet 4.6 vs Claude 3.5 Sonnet",
      "slug": "anthropic-claude-sonnet-4-6-vs-anthropic-claude-3-5-sonnet",
      "url": "https://learn.engineering.vips.edu/compare/anthropic-claude-sonnet-4-6-vs-anthropic-claude-3-5-sonnet",
      "description": "Claude Sonnet 4.6 is the 2025/26 production workhorse with stronger coding and longer context; Claude 3.5 Sonnet (June 2024) is the earlier generation that set the original Sonnet bar and still appears in many existing pipelines.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Claude Sonnet 4.6",
        "Claude 3.5 Sonnet",
        "Claude upgrade",
        "LLM comparison",
        "Anthropic"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Sonnet 4.6 vs DeepSeek V3",
      "slug": "anthropic-claude-sonnet-4-6-vs-deepseek-v3",
      "url": "https://learn.engineering.vips.edu/compare/anthropic-claude-sonnet-4-6-vs-deepseek-v3",
      "description": "Claude Sonnet 4.6 wins on tool-use reliability, reasoning polish, and enterprise support; DeepSeek V3 wins on raw cost per token, open weights, and self-hostable deployment. Use this to pick by workload.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Claude Sonnet 4.6",
        "DeepSeek V3",
        "open weights",
        "LLM comparison",
        "cost per token"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Sonnet 4.6 vs Gemini 2.5 Flash",
      "slug": "anthropic-claude-sonnet-4-6-vs-google-gemini-2-5-flash",
      "url": "https://learn.engineering.vips.edu/compare/anthropic-claude-sonnet-4-6-vs-google-gemini-2-5-flash",
      "description": "Claude Sonnet 4.6 is Anthropic's mid-tier workhorse; Gemini 2.5 Flash is Google's fast mid-tier. Sonnet wins on coding and tool-use; Flash wins on multimodal breadth and cost.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Claude Sonnet 4.6",
        "Gemini 2.5 Flash",
        "mid-tier LLM",
        "Anthropic vs Google"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Sonnet 4.6 vs Gemini 2.5 Pro",
      "slug": "anthropic-claude-sonnet-4-6-vs-google-gemini-2-5-pro",
      "url": "https://learn.engineering.vips.edu/compare/anthropic-claude-sonnet-4-6-vs-google-gemini-2-5-pro",
      "description": "Claude Sonnet 4.6 and Gemini 2.5 Pro are the workhorse pro-tier models. Sonnet wins on coding agents; Gemini wins on native multimodal and grounded search.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Claude Sonnet 4.6",
        "Gemini 2.5 Pro",
        "LLM comparison",
        "pro-tier model",
        "agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Sonnet 4.6 vs GPT-5 mini",
      "slug": "anthropic-claude-sonnet-4-6-vs-openai-gpt-5-mini",
      "url": "https://learn.engineering.vips.edu/compare/anthropic-claude-sonnet-4-6-vs-openai-gpt-5-mini",
      "description": "Claude Sonnet 4.6 and GPT-5 mini are the workhorse mid-tier models of 2026. Sonnet wins on agent reliability and coding; GPT-5 mini wins on price, latency, and ecosystem breadth.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Claude Sonnet 4.6",
        "GPT-5 mini",
        "mid-tier LLM",
        "LLM comparison",
        "coding agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Arize Phoenix vs Langfuse",
      "slug": "arize-phoenix-vs-langfuse",
      "url": "https://learn.engineering.vips.edu/compare/arize-phoenix-vs-langfuse",
      "description": "Arize Phoenix is an open-source OpenTelemetry-native LLM observability tool that runs locally or as part of Arize AX; Langfuse is a self-hostable or cloud LLM observability and evaluation platform built around traces, sessions, and prompt experiments.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Arize Phoenix",
        "Langfuse",
        "LLM observability",
        "tracing",
        "evaluation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AutoGen vs CrewAI",
      "slug": "autogen-vs-crewai",
      "url": "https://learn.engineering.vips.edu/compare/autogen-vs-crewai",
      "description": "AutoGen is the research-grade multi-agent framework with flexible conversation patterns; CrewAI is the role-based, opinionated framework that ships faster to production. Pick by whether you need research flexibility or role-based simplicity.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "AutoGen",
        "CrewAI",
        "multi-agent framework",
        "agent orchestration",
        "agentic AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AutoGen vs LangGraph",
      "slug": "autogen-vs-langgraph",
      "url": "https://learn.engineering.vips.edu/compare/autogen-vs-langgraph",
      "description": "AutoGen (Microsoft) and LangGraph (LangChain) are leading multi-agent frameworks. AutoGen emphasizes conversational agent teams; LangGraph emphasizes explicit state graphs.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "AutoGen",
        "LangGraph",
        "multi-agent framework",
        "LangChain",
        "agent framework"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Axolotl vs TorchTune",
      "slug": "axolotl-vs-torchtune",
      "url": "https://learn.engineering.vips.edu/compare/axolotl-vs-torchtune",
      "description": "Axolotl and TorchTune are both open-source LLM fine-tuning libraries. Axolotl is YAML-config-first and community-driven; TorchTune is PyTorch-first from the PyTorch team. Pick by workflow preference.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Axolotl vs TorchTune",
        "LLM fine-tuning",
        "LoRA",
        "QLoRA",
        "PyTorch"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Axolotl vs Unsloth",
      "slug": "axolotl-vs-unsloth",
      "url": "https://learn.engineering.vips.edu/compare/axolotl-vs-unsloth",
      "description": "Axolotl is a configuration-driven fine-tuning framework with the widest technique coverage; Unsloth is a speed- and memory-optimised library that lets you fine-tune on smaller GPUs. Pick by whether you're chasing flexibility or efficiency.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Axolotl",
        "Unsloth",
        "LLM fine-tuning",
        "LoRA",
        "QLoRA"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "BAML vs Outlines",
      "slug": "baml-vs-outlines",
      "url": "https://learn.engineering.vips.edu/compare/baml-vs-outlines",
      "description": "BAML is a schema-first language and compiler for structured LLM outputs; Outlines is a Python library that constrains token generation to regex, JSON schema, or grammars. Pick by deployment model.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "BAML",
        "Outlines",
        "structured outputs",
        "LLM",
        "constrained decoding"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "BentoML vs Ray Serve (LLM)",
      "slug": "bentoml-vs-ray-serve-llm",
      "url": "https://learn.engineering.vips.edu/compare/bentoml-vs-ray-serve-llm",
      "description": "BentoML is a Python-first model-serving framework with strong LLM support via OpenLLM; Ray Serve is the serving layer of the Ray ecosystem, designed for scale-out composition of LLMs, retrievers, and agent tools.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "BentoML",
        "Ray Serve",
        "LLM serving",
        "OpenLLM",
        "model deployment"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "BGE-M3 vs Jina Embeddings v3",
      "slug": "bge-m3-vs-jina-embeddings-v3",
      "url": "https://learn.engineering.vips.edu/compare/bge-m3-vs-jina-embeddings-v3",
      "description": "BGE-M3 (BAAI) and Jina Embeddings v3 are two leading open-weights multilingual embedding models. BGE-M3 supports dense/sparse/multi-vector; Jina v3 has strong task-specific LoRAs.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "BGE-M3 vs Jina",
        "embedding models",
        "multilingual embeddings",
        "hybrid search",
        "RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "BGE-M3 vs Voyage-3",
      "slug": "bge-m3-vs-voyage-3",
      "url": "https://learn.engineering.vips.edu/compare/bge-m3-vs-voyage-3",
      "description": "BGE-M3 is the open-weight multi-functional embedding model with dense, sparse, and multi-vector retrieval in one; Voyage-3 is a closed-API embedding with top English and code retrieval quality. Pick by self-host vs managed trade-off.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "BGE-M3",
        "Voyage-3",
        "embeddings",
        "RAG",
        "hybrid search"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Flux 1 Pro vs Midjourney v6.1",
      "slug": "black-forest-flux-1-pro-vs-midjourney-v6-1",
      "url": "https://learn.engineering.vips.edu/compare/black-forest-flux-1-pro-vs-midjourney-v6-1",
      "description": "Flux 1 Pro is Black Forest Labs' API / self-hostable flagship. Midjourney v6.1 is the aesthetic-favourite Discord / web product. Pick by whether you need API access.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Flux vs Midjourney",
        "Flux 1 Pro",
        "Midjourney v6.1",
        "image generation",
        "AI art"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Braintrust vs LangSmith",
      "slug": "braintrust-vs-langsmith",
      "url": "https://learn.engineering.vips.edu/compare/braintrust-vs-langsmith",
      "description": "Braintrust is an eval-first observability platform with strong offline testing; LangSmith is the LangChain-native tracing and evaluation stack. Pick by whether your stack is LangChain-centric or framework-agnostic.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Braintrust",
        "LangSmith",
        "LLM observability",
        "LLM evaluation",
        "tracing"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "CAMEL-AI vs CrewAI",
      "slug": "camel-ai-vs-crewai",
      "url": "https://learn.engineering.vips.edu/compare/camel-ai-vs-crewai",
      "description": "CAMEL-AI is a research-focused Python framework for role-playing multi-agent simulations; CrewAI is a production-oriented framework for orchestrating collaborative agents with explicit roles, tasks, and tools.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "CAMEL-AI",
        "CrewAI",
        "multi-agent framework",
        "agent orchestration",
        "LLM agents"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Cartesia Sonic vs Deepgram Aura",
      "slug": "cartesia-sonic-vs-deepgram-aura",
      "url": "https://learn.engineering.vips.edu/compare/cartesia-sonic-vs-deepgram-aura",
      "description": "Cartesia Sonic and Deepgram Aura are two low-latency real-time TTS APIs designed for voice agents. Pick by latency target and voice quality needs.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Cartesia Sonic vs Deepgram Aura",
        "real-time TTS",
        "voice agents",
        "streaming TTS",
        "low latency TTS"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Chain-of-Thought vs ReAct Pattern",
      "slug": "chain-of-thought-vs-react-pattern",
      "url": "https://learn.engineering.vips.edu/compare/chain-of-thought-vs-react-pattern",
      "description": "Chain-of-Thought makes the model think step-by-step; ReAct interleaves thinking with tool use. CoT is for pure reasoning; ReAct is for agents that need to act.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "chain of thought",
        "ReAct",
        "agent pattern",
        "LLM prompting",
        "LangChain"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Chain-of-Thought vs Tree-of-Thoughts",
      "slug": "chain-of-thought-vs-tree-of-thoughts",
      "url": "https://learn.engineering.vips.edu/compare/chain-of-thought-vs-tree-of-thoughts",
      "description": "Chain-of-Thought makes a model reason step by step in a single sequence. Tree-of-Thoughts explores multiple reasoning branches and chooses the best. Pick by problem shape.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Creativity",
      "keywords": [
        "CoT vs ToT",
        "chain of thought",
        "tree of thoughts",
        "reasoning prompting",
        "LLM reasoning"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Chroma vs Qdrant",
      "slug": "chroma-vs-qdrant",
      "url": "https://learn.engineering.vips.edu/compare/chroma-vs-qdrant",
      "description": "Chroma is a developer-first embedded vector database ideal for prototypes; Qdrant is a production-grade vector search engine with stronger filtering, scalability, and self-hosted maturity. Pick by deployment scale.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Chroma",
        "Qdrant",
        "vector database",
        "RAG",
        "similarity search"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Opus 4.7 vs Gemini 2.5 Pro",
      "slug": "claude-opus-4-7-vs-google-gemini-2-5-pro",
      "url": "https://learn.engineering.vips.edu/compare/claude-opus-4-7-vs-google-gemini-2-5-pro",
      "description": "Claude Opus 4.7 leads on coding agents and tool reliability; Gemini 2.5 Pro leads on context size (2M), video understanding, and Google Workspace integration.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Claude vs Gemini",
        "Gemini 2.5 Pro",
        "long context",
        "video understanding"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Opus 4.7 vs GPT-5",
      "slug": "claude-opus-4-7-vs-gpt-5",
      "url": "https://learn.engineering.vips.edu/compare/claude-opus-4-7-vs-gpt-5",
      "description": "Claude Opus 4.7 wins for long-horizon coding agents and tool reliability; GPT-5 wins for multimodal (esp. audio), ecosystem breadth, and general-purpose latency. Pick by workload.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Claude vs GPT",
        "Claude Opus 4.7",
        "GPT-5",
        "LLM comparison",
        "coding agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Closed API vs Self-Hosted LLM",
      "slug": "closed-api-vs-self-hosted-llm",
      "url": "https://learn.engineering.vips.edu/compare/closed-api-vs-self-hosted-llm",
      "description": "Closed APIs (OpenAI, Anthropic, Google) give you the best models with zero ops; self-hosted LLMs give you data control, cost predictability at scale, and customisation. Pick by your constraints, not your ideology.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "closed LLM",
        "self-hosted LLM",
        "open weights",
        "data sovereignty",
        "LLM cost"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Cohere Embed v3 vs OpenAI text-embedding-3-large",
      "slug": "cohere-embed-v3-vs-openai-text-embedding-3-large",
      "url": "https://learn.engineering.vips.edu/compare/cohere-embed-v3-vs-openai-text-embedding-3-large",
      "description": "Cohere Embed v3 offers strong multilingual quality with compression-aware embeddings; OpenAI text-embedding-3-large leads on English retrieval quality with flexible dimensionality. Pick by language mix and ecosystem.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Cohere Embed v3",
        "text-embedding-3-large",
        "embeddings",
        "RAG",
        "multilingual"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Cohere Rerank 3 vs Jina Reranker v2",
      "slug": "cohere-rerank-3-vs-jina-reranker-v2",
      "url": "https://learn.engineering.vips.edu/compare/cohere-rerank-3-vs-jina-reranker-v2",
      "description": "Cohere Rerank 3 and Jina Reranker v2 are two leading API cross-encoder rerankers. Cohere leads on benchmark quality; Jina leads on latency and self-hostable options.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Cohere Rerank vs Jina",
        "reranker",
        "cross-encoder",
        "RAG rerank",
        "retrieval"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Constitutional AI vs RLHF",
      "slug": "constitutional-ai-vs-rlhf",
      "url": "https://learn.engineering.vips.edu/compare/constitutional-ai-vs-rlhf",
      "description": "RLHF trains models on human preference labels. Constitutional AI uses a written constitution plus AI self-critique (RLAIF). Pick by scale and alignment philosophy.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Creativity",
      "keywords": [
        "Constitutional AI vs RLHF",
        "RLHF",
        "Constitutional AI",
        "alignment",
        "RLAIF"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "CrewAI vs LangGraph",
      "slug": "crewai-vs-langgraph",
      "url": "https://learn.engineering.vips.edu/compare/crewai-vs-langgraph",
      "description": "CrewAI emphasizes role-based agent teams with a high-level API; LangGraph emphasizes explicit state graphs. CrewAI is easier to start; LangGraph is more powerful.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "CrewAI",
        "LangGraph",
        "agent framework",
        "multi-agent",
        "LLM orchestration"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DeepEval vs Giskard",
      "slug": "deepeval-vs-giskard",
      "url": "https://learn.engineering.vips.edu/compare/deepeval-vs-giskard",
      "description": "DeepEval is an open-source LLM evaluation framework (pytest-style). Giskard is a broader ML testing and scanning platform with LLM features. Pick by whether you're LLM-only or broader ML.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "DeepEval vs Giskard",
        "LLM evaluation",
        "LLM testing",
        "RAG evaluation",
        "AI testing"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Deepgram Nova-3 vs OpenAI Whisper v3",
      "slug": "deepgram-nova-3-vs-openai-whisper-v3",
      "url": "https://learn.engineering.vips.edu/compare/deepgram-nova-3-vs-openai-whisper-v3",
      "description": "Deepgram Nova-3 wins on real-time streaming latency, speaker diarisation, and noisy-audio accuracy; Whisper v3 wins on multilingual coverage and open-source self-hosting. Pick by latency and language needs.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Deepgram Nova-3",
        "Whisper v3",
        "speech to text",
        "ASR",
        "real-time transcription"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DeepSeek Coder V2 vs Mistral Codestral",
      "slug": "deepseek-coder-v2-vs-mistral-codestral",
      "url": "https://learn.engineering.vips.edu/compare/deepseek-coder-v2-vs-mistral-codestral",
      "description": "Two open-weights coding specialists. DeepSeek Coder V2 is MoE, 128k context, multi-file strong. Codestral is dense, fast, tuned for IDE completion across 80+ languages.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "DeepSeek Coder V2",
        "Mistral Codestral",
        "open code LLM",
        "IDE completion"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DeepSeek R1 vs OpenAI o1",
      "slug": "deepseek-r1-vs-openai-o1",
      "url": "https://learn.engineering.vips.edu/compare/deepseek-r1-vs-openai-o1",
      "description": "DeepSeek R1 and OpenAI o1 are reasoning-first models. R1 is open-weight and dramatically cheaper; o1 is the closed-source original, with broader ecosystem support.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "DeepSeek R1",
        "OpenAI o1",
        "reasoning model",
        "open-weight LLM",
        "LLM comparison"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DeepSeek R1 vs OpenAI o3",
      "slug": "deepseek-r1-vs-openai-o3",
      "url": "https://learn.engineering.vips.edu/compare/deepseek-r1-vs-openai-o3",
      "description": "DeepSeek R1 is the leading open-weights reasoning model; OpenAI o3 is the closed frontier. o3 leads on hardest reasoning; R1 is available for self-hosting and is 10-20x cheaper via API.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "DeepSeek R1",
        "OpenAI o3",
        "reasoning LLM",
        "open vs closed",
        "o1 o3 comparison"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DeepSeek V3 vs Llama 3.1 405B",
      "slug": "deepseek-v3-vs-meta-llama-3-1-405b",
      "url": "https://learn.engineering.vips.edu/compare/deepseek-v3-vs-meta-llama-3-1-405b",
      "description": "DeepSeek V3 and Llama 3.1 405B are the two landmark open-weight dense/MoE models. V3 is more efficient and stronger at coding; 405B has simpler deployment and a larger ecosystem.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "DeepSeek V3",
        "Llama 3.1 405B",
        "open-weight LLM",
        "MoE",
        "LLM comparison"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DeepSpeed vs HuggingFace Accelerate",
      "slug": "deepspeed-vs-accelerate-huggingface",
      "url": "https://learn.engineering.vips.edu/compare/deepspeed-vs-accelerate-huggingface",
      "description": "DeepSpeed is Microsoft's high-performance distributed training engine with ZeRO sharding and offload; HuggingFace Accelerate is a lightweight wrapper that makes any PyTorch training loop run across devices, often using DeepSpeed or FSDP under the hood.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "DeepSpeed",
        "Accelerate",
        "distributed training",
        "ZeRO",
        "fine-tuning"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Dify vs Flowise",
      "slug": "dify-vs-flowise",
      "url": "https://learn.engineering.vips.edu/compare/dify-vs-flowise",
      "description": "Dify and Flowise are visual LLM app builders. Dify is an opinionated LLMOps platform with RAG, agents, and eval built in; Flowise is a LangChain-native node editor with more flexibility.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Dify",
        "Flowise",
        "visual LLM builder",
        "no-code AI",
        "LLMOps"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Dify vs Langflow",
      "slug": "dify-vs-langflow",
      "url": "https://learn.engineering.vips.edu/compare/dify-vs-langflow",
      "description": "Dify is an opinionated LLMOps platform; Langflow is a LangChain-native visual IDE backed by DataStax. Dify wins on ops breadth; Langflow wins on code-centric extensibility.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Dify",
        "Langflow",
        "visual LLM builder",
        "LangChain",
        "LLMOps"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Distillation vs Quantization",
      "slug": "distillation-vs-quantization",
      "url": "https://learn.engineering.vips.edu/compare/distillation-vs-quantization",
      "description": "Distillation trains a smaller student model to mimic a larger teacher; quantization reduces precision of existing weights. Distillation trades training cost; quantization trades accuracy.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "distillation",
        "quantization",
        "LLM compression",
        "model optimization",
        "inference cost"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DSPy vs LangChain",
      "slug": "dspy-vs-langchain",
      "url": "https://learn.engineering.vips.edu/compare/dspy-vs-langchain",
      "description": "DSPy is a prompt-programming framework that compiles prompts from training data; LangChain is a general LLM orchestration library with tools, memory, and agents. Use DSPy for optimised pipelines; LangChain for general application plumbing.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "DSPy",
        "LangChain",
        "prompt optimisation",
        "LLM framework",
        "agent framework"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DSPy vs TextGrad",
      "slug": "dspy-vs-textgrad",
      "url": "https://learn.engineering.vips.edu/compare/dspy-vs-textgrad",
      "description": "DSPy compiles prompt programs and optimizes them against metrics. TextGrad applies 'textual gradient' optimization across LLM modules. Both automate prompt-and-module tuning — pick by approach.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "DSPy vs TextGrad",
        "prompt optimization",
        "prompt compilation",
        "LLM programming"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Elasticsearch vs Weaviate",
      "slug": "elasticsearch-vs-weaviate",
      "url": "https://learn.engineering.vips.edu/compare/elasticsearch-vs-weaviate",
      "description": "Elasticsearch is the mature keyword and full-text search engine that recently added vector search; Weaviate is a vector-first database with strong hybrid search and built-in AI modules. Pick by which search mode is primary.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Elasticsearch",
        "Weaviate",
        "hybrid search",
        "vector database",
        "search engine"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "ElevenLabs Multilingual v2 vs OpenAI TTS-HD",
      "slug": "elevenlabs-multilingual-v2-vs-openai-tts-hd",
      "url": "https://learn.engineering.vips.edu/compare/elevenlabs-multilingual-v2-vs-openai-tts-hd",
      "description": "ElevenLabs Multilingual v2 and OpenAI TTS-HD are the two mainstream API text-to-speech models. ElevenLabs leads on voice quality and cloning; OpenAI leads on price and ecosystem.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "ElevenLabs vs OpenAI TTS",
        "text to speech",
        "voice AI",
        "ElevenLabs Multilingual v2",
        "OpenAI TTS-HD"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Few-Shot Prompting vs Fine-Tuning",
      "slug": "few-shot-prompting-vs-fine-tuning",
      "url": "https://learn.engineering.vips.edu/compare/few-shot-prompting-vs-fine-tuning",
      "description": "Few-shot prompting teaches a model at inference time via examples. Fine-tuning updates weights on your data. Pick by volume, task stability, and cost structure.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Creativity",
      "keywords": [
        "few-shot vs fine-tuning",
        "few-shot prompting",
        "fine-tuning LLM",
        "prompt engineering"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Fine-Tuning vs Retrieval-Augmented Generation (RAG)",
      "slug": "fine-tuning-vs-retrieval-augmented-generation",
      "url": "https://learn.engineering.vips.edu/compare/fine-tuning-vs-retrieval-augmented-generation",
      "description": "Fine-tuning bakes knowledge into model weights; RAG retrieves it at inference time. Use RAG for facts that change; fine-tune for behavior, format, and style.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "RAG",
        "fine-tuning",
        "LLM architecture",
        "retrieval augmented generation",
        "LoRA"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Firecrawl vs Jina Reader",
      "slug": "firecrawl-vs-jina-reader",
      "url": "https://learn.engineering.vips.edu/compare/firecrawl-vs-jina-reader",
      "description": "Firecrawl and Jina Reader turn web pages into LLM-ready Markdown. Firecrawl is crawl-first with a JS-heavy renderer; Jina Reader is fast single-URL fetch with a free public endpoint.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Firecrawl",
        "Jina Reader",
        "web scraping",
        "LLM ingestion",
        "RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Flowise vs Langflow",
      "slug": "flowise-vs-langflow",
      "url": "https://learn.engineering.vips.edu/compare/flowise-vs-langflow",
      "description": "Both are visual LangChain app builders. Flowise is Node.js/TypeScript-native; Langflow is Python-native, backed by DataStax. Pick by runtime preference and ecosystem.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Flowise",
        "Langflow",
        "visual LangChain",
        "no-code LLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Full Fine-Tuning vs LoRA",
      "slug": "full-fine-tuning-vs-lora",
      "url": "https://learn.engineering.vips.edu/compare/full-fine-tuning-vs-lora",
      "description": "Full fine-tuning updates every parameter; LoRA updates only small adapter matrices. LoRA is cheaper and composable; full fine-tuning is stronger when done right.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "LoRA",
        "full fine-tuning",
        "PEFT",
        "parameter-efficient fine-tuning",
        "LLM training"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Function Calling vs MCP Tools",
      "slug": "function-calling-vs-mcp-tools",
      "url": "https://learn.engineering.vips.edu/compare/function-calling-vs-mcp-tools",
      "description": "Function calling is a per-provider API that lets the model call JSON-schema-described tools; MCP (Model Context Protocol) is an Anthropic-authored open standard for connecting any client to any tool or data server over a uniform protocol.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Creativity",
      "keywords": [
        "function calling",
        "MCP",
        "Model Context Protocol",
        "tool use",
        "LLM agents"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemini 1.5 Flash vs Gemini 1.5 Pro",
      "slug": "google-gemini-1-5-flash-vs-google-gemini-1-5-pro",
      "url": "https://learn.engineering.vips.edu/compare/google-gemini-1-5-flash-vs-google-gemini-1-5-pro",
      "description": "Gemini 1.5 Flash wins on cost and latency for high-volume tasks; Gemini 1.5 Pro wins on reasoning, long-context depth, and multimodal fidelity. Route by task complexity within the same family.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Gemini 1.5 Flash",
        "Gemini 1.5 Pro",
        "Google",
        "long context",
        "multimodal"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemini 1.5 Pro vs Gemini 2.5 Pro",
      "slug": "google-gemini-1-5-pro-vs-google-gemini-2-5-pro",
      "url": "https://learn.engineering.vips.edu/compare/google-gemini-1-5-pro-vs-google-gemini-2-5-pro",
      "description": "Gemini 1.5 Pro pioneered 1M-token context; Gemini 2.5 Pro extends that with stronger reasoning, faster latency, and 2M context. 2.5 Pro is a strict upgrade for new work.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Gemini 1.5 Pro",
        "Gemini 2.5 Pro",
        "long context LLM",
        "Google AI",
        "LLM comparison"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemini 1.5 Pro vs GPT-4o",
      "slug": "google-gemini-1-5-pro-vs-openai-gpt-4o",
      "url": "https://learn.engineering.vips.edu/compare/google-gemini-1-5-pro-vs-openai-gpt-4o",
      "description": "Two 2024-era flagships, both legacy. Gemini 1.5 Pro led on long context (2M) and video; GPT-4o led on reasoning and ecosystem. Use this page to plan migration.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Gemini 1.5 Pro",
        "GPT-4o",
        "legacy flagship LLM",
        "long context",
        "migration"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemini 2.0 Flash vs Gemini 2.5 Flash",
      "slug": "google-gemini-2-0-flash-vs-google-gemini-2-5-flash",
      "url": "https://learn.engineering.vips.edu/compare/google-gemini-2-0-flash-vs-google-gemini-2-5-flash",
      "description": "Gemini 2.0 Flash was Google's 2024-era fast mid-tier model; 2.5 Flash adds a thinking budget, stronger reasoning, better multimodal grounding, and longer context at a similar price point.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Gemini 2.5 Flash",
        "Gemini 2.0 Flash",
        "Google AI",
        "thinking budget",
        "LLM comparison"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemini 2.0 Flash vs GPT-4o",
      "slug": "google-gemini-2-0-flash-vs-openai-gpt-4o",
      "url": "https://learn.engineering.vips.edu/compare/google-gemini-2-0-flash-vs-openai-gpt-4o",
      "description": "Two 2024-era multimodal workhorses, both now legacy. Gemini 2.0 Flash was Google's cheap fast model; GPT-4o was OpenAI's native-multimodal flagship. Use this to plan migration.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Gemini 2.0 Flash",
        "GPT-4o",
        "legacy multimodal LLM",
        "migration"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemini 2.5 Flash vs GPT-5 mini",
      "slug": "google-gemini-2-5-flash-vs-openai-gpt-5-mini",
      "url": "https://learn.engineering.vips.edu/compare/google-gemini-2-5-flash-vs-openai-gpt-5-mini",
      "description": "Gemini 2.5 Flash and GPT-5 mini are the two dominant cheap mid-tier models. Flash wins on price and context length; GPT-5 mini wins on quality and ecosystem depth.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Gemini 2.5 Flash",
        "GPT-5 mini",
        "cheap LLM",
        "LLM comparison",
        "RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemini 2.5 Flash vs GPT-5 Nano",
      "slug": "google-gemini-2-5-flash-vs-openai-gpt-5-nano",
      "url": "https://learn.engineering.vips.edu/compare/google-gemini-2-5-flash-vs-openai-gpt-5-nano",
      "description": "Two fast, cheap workhorses: Gemini 2.5 Flash (Google) vs GPT-5 Nano (OpenAI). Flash wins on multimodal and long context; Nano wins on reasoning per dollar and structured outputs.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Gemini 2.5 Flash",
        "GPT-5 Nano",
        "cheap LLM",
        "fast LLM comparison"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemini 2.5 Pro vs Llama 3.1 405B",
      "slug": "google-gemini-2-5-pro-vs-meta-llama-3-1-405b",
      "url": "https://learn.engineering.vips.edu/compare/google-gemini-2-5-pro-vs-meta-llama-3-1-405b",
      "description": "Gemini 2.5 Pro is a closed frontier model with huge context and native multimodality. Llama 3.1 405B is the largest open-weight Meta model — strong, downloadable, and self-hostable. Pick by open-weights need.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Gemini vs Llama",
        "Gemini 2.5 Pro",
        "Llama 3.1 405B",
        "open-weights LLM",
        "self-hosted AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemini 2.5 Pro vs OpenAI o3",
      "slug": "google-gemini-2-5-pro-vs-openai-o3",
      "url": "https://learn.engineering.vips.edu/compare/google-gemini-2-5-pro-vs-openai-o3",
      "description": "Gemini 2.5 Pro wins on long-context reasoning, multimodal breadth, and cost; o3 wins on deep chain-of-thought reasoning, math, and tool-use under hard problems. Both are reasoning models — pick by whether you need context or depth.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Gemini 2.5 Pro",
        "OpenAI o3",
        "reasoning models",
        "LLM comparison",
        "long context"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemma 2 9B vs Phi-4",
      "slug": "google-gemma-2-9b-vs-microsoft-phi-4",
      "url": "https://learn.engineering.vips.edu/compare/google-gemma-2-9b-vs-microsoft-phi-4",
      "description": "Gemma 2 9B is Google's small open-weights dense model. Phi-4 is Microsoft's 14B synthetic-data-trained small model — known for punching above its weight. Pick by task shape.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Phi-4 vs Gemma",
        "small LLM",
        "local LLM",
        "edge AI",
        "on-device AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Gemma 3 27B vs Llama 3.1 8B Instruct",
      "slug": "google-gemma-3-27b-vs-meta-llama-3-1-8b-instruct",
      "url": "https://learn.engineering.vips.edu/compare/google-gemma-3-27b-vs-meta-llama-3-1-8b-instruct",
      "description": "Gemma 3 27B is Google's flagship open-weights mid-size model; Llama 3.1 8B is Meta's small workhorse. Gemma is stronger on quality; Llama is 3x smaller and far cheaper to serve.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Gemma 3",
        "Llama 3.1 8B",
        "open-weights small LLM",
        "self-hosted LLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Imagen 3 vs DALL·E 3",
      "slug": "google-imagen-3-vs-openai-dall-e-3",
      "url": "https://learn.engineering.vips.edu/compare/google-imagen-3-vs-openai-dall-e-3",
      "description": "Imagen 3 (Google) and DALL·E 3 (OpenAI) are the two mainstream API image generators. Imagen 3 leads on photorealism and text rendering; DALL·E 3 leads on prompt following and ecosystem.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Imagen 3 vs DALL-E 3",
        "image generation",
        "text to image",
        "Google Imagen",
        "DALL-E"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Google Imagen 3 vs Stable Diffusion 3.5 Large",
      "slug": "google-imagen-3-vs-stability-stable-diffusion-3-5-large",
      "url": "https://learn.engineering.vips.edu/compare/google-imagen-3-vs-stability-stable-diffusion-3-5-large",
      "description": "Google Imagen 3 is a closed-API text-to-image model with high photorealism and strong prompt adherence; Stable Diffusion 3.5 Large is Stability AI's open-weights 8B MM-DiT model tuned for self-hosted creative pipelines.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Imagen 3",
        "Stable Diffusion 3.5",
        "text to image",
        "open weights image model",
        "image generation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Veo 3 vs Sora",
      "slug": "google-veo-3-vs-openai-sora",
      "url": "https://learn.engineering.vips.edu/compare/google-veo-3-vs-openai-sora",
      "description": "Google Veo 3 and OpenAI Sora are the two most capable generalist text-to-video models. Veo 3 leads on motion realism and duration; Sora leads on prompt following and ecosystem.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Veo 3 vs Sora",
        "text to video",
        "AI video",
        "Google Veo",
        "OpenAI Sora"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPT Engineer vs Open Interpreter",
      "slug": "gpt-engineer-vs-open-interpreter",
      "url": "https://learn.engineering.vips.edu/compare/gpt-engineer-vs-open-interpreter",
      "description": "GPT Engineer scaffolds whole projects from a natural-language spec; Open Interpreter is a local shell-like agent that runs code on your machine to accomplish tasks step by step.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "GPT Engineer",
        "Open Interpreter",
        "coding agent",
        "code generation",
        "open source"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Groq vs Together AI",
      "slug": "groq-vs-together-ai",
      "url": "https://learn.engineering.vips.edu/compare/groq-vs-together-ai",
      "description": "Groq and Together AI both host open-weights LLMs behind an API. Groq specializes in ultra-low-latency inference on LPU hardware; Together AI offers the broadest model catalogue on GPUs.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Groq vs Together",
        "LLM inference",
        "LPU",
        "open-weights hosting",
        "fast LLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Guidance vs Outlines",
      "slug": "guidance-vs-outlines",
      "url": "https://learn.engineering.vips.edu/compare/guidance-vs-outlines",
      "description": "Guidance (Microsoft) and Outlines are both libraries for constrained generation — forcing LLM output to conform to schemas, regex, or grammars. Pick by model backend and language.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Guidance vs Outlines",
        "constrained generation",
        "structured outputs",
        "JSON mode",
        "grammar"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Haystack vs LlamaIndex",
      "slug": "haystack-vs-llamaindex",
      "url": "https://learn.engineering.vips.edu/compare/haystack-vs-llamaindex",
      "description": "Haystack is a pipeline-oriented RAG framework with strong production defaults; LlamaIndex is a data-ingestion-first framework with the largest connector catalogue. Pick by whether you start from pipelines or from data.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Haystack",
        "LlamaIndex",
        "RAG framework",
        "retrieval",
        "LLM orchestration"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Haystack vs R2R",
      "slug": "haystack-vs-r2r",
      "url": "https://learn.engineering.vips.edu/compare/haystack-vs-r2r",
      "description": "Haystack is a mature Python framework from deepset for building RAG, search, and agent pipelines with composable components; R2R is a newer, opinionated production RAG engine with built-in ingestion, GraphRAG, and evaluation.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Haystack",
        "R2R",
        "RAG framework",
        "retrieval augmented generation",
        "GraphRAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Helicone vs Langfuse",
      "slug": "helicone-vs-langfuse",
      "url": "https://learn.engineering.vips.edu/compare/helicone-vs-langfuse",
      "description": "Helicone is a proxy-based LLM observability platform that requires zero SDK changes; Langfuse is an OpenTelemetry-native platform with deeper tracing and self-hosted maturity. Pick by how much integration you're willing to do.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Helicone",
        "Langfuse",
        "LLM observability",
        "tracing",
        "self-hosted"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Hybrid Search vs Vector Search",
      "slug": "hybrid-search-vs-vector-search",
      "url": "https://learn.engineering.vips.edu/compare/hybrid-search-vs-vector-search",
      "description": "Vector search uses dense embeddings; hybrid search blends vector with keyword (BM25) for better precision on rare terms and exact matches. Most production RAG should use hybrid.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Creativity",
      "keywords": [
        "hybrid search vs vector search",
        "hybrid search",
        "BM25",
        "dense retrieval",
        "RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "In-Context Learning vs Fine-Tuning",
      "slug": "in-context-learning-vs-fine-tuning",
      "url": "https://learn.engineering.vips.edu/compare/in-context-learning-vs-fine-tuning",
      "description": "In-context learning adapts model behaviour by putting examples and instructions into the prompt; fine-tuning adapts the model's parameters to a specific dataset or style and persists across requests.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Creativity",
      "keywords": [
        "in context learning",
        "fine tuning",
        "LoRA",
        "few shot",
        "LLM customisation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Instructor vs Pydantic AI",
      "slug": "instructor-vs-pydantic-ai",
      "url": "https://learn.engineering.vips.edu/compare/instructor-vs-pydantic-ai",
      "description": "Instructor is a thin library that patches LLM clients for typed, validated outputs; Pydantic AI is a full agent framework built on the same Pydantic foundation. Pick by whether you need a wrapper or a framework.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Instructor",
        "Pydantic AI",
        "structured outputs",
        "LLM framework",
        "typed agents"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Jina Embeddings v3 vs Voyage AI voyage-3",
      "slug": "jina-embeddings-v3-vs-voyage-ai-voyage-3",
      "url": "https://learn.engineering.vips.edu/compare/jina-embeddings-v3-vs-voyage-ai-voyage-3",
      "description": "Jina Embeddings v3 is an open-weights multilingual embedding model with task-specific LoRA adapters; voyage-3 is Voyage AI's closed-API general-purpose model optimised for retrieval quality across English and code.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Jina embeddings",
        "Voyage AI",
        "voyage-3",
        "embedding model",
        "semantic search"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LanceDB vs pgvector",
      "slug": "lancedb-vs-pgvector",
      "url": "https://learn.engineering.vips.edu/compare/lancedb-vs-pgvector",
      "description": "LanceDB is an embedded, columnar vector database; pgvector is a Postgres extension. LanceDB wins on analytical + vector scale; pgvector wins on simplicity and SQL integration.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "LanceDB",
        "pgvector",
        "vector database",
        "Postgres",
        "embedded DB"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LangChain vs LlamaIndex",
      "slug": "langchain-vs-llamaindex",
      "url": "https://learn.engineering.vips.edu/compare/langchain-vs-llamaindex",
      "description": "LangChain is the general agent & orchestration framework; LlamaIndex is the retrieval-over-your-data framework. They often coexist — RAG layer in LlamaIndex, agent layer in LangChain.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "LangChain",
        "LlamaIndex",
        "RAG",
        "orchestration",
        "agents",
        "framework"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Langfuse vs LangSmith",
      "slug": "langfuse-vs-langsmith",
      "url": "https://learn.engineering.vips.edu/compare/langfuse-vs-langsmith",
      "description": "Langfuse and LangSmith are the two leading LLM observability tools. LangSmith is the first-party LangChain option; Langfuse is open-source and framework-agnostic.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Langfuse",
        "LangSmith",
        "LLM observability",
        "LLM tracing",
        "LLM evals"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LangGraph vs OpenAI Agents SDK",
      "slug": "langgraph-vs-openai-agents-sdk",
      "url": "https://learn.engineering.vips.edu/compare/langgraph-vs-openai-agents-sdk",
      "description": "LangGraph is a provider-agnostic agent state-graph framework; the OpenAI Agents SDK is OpenAI's first-party orchestration layer. LangGraph wins on portability; the SDK wins on OpenAI-native integration.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "LangGraph",
        "OpenAI Agents SDK",
        "agent framework",
        "Responses API",
        "Assistants API"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LiteLLM vs OpenRouter",
      "slug": "litellm-vs-openrouter",
      "url": "https://learn.engineering.vips.edu/compare/litellm-vs-openrouter",
      "description": "LiteLLM is an open-source Python library / proxy that unifies LLM APIs. OpenRouter is a hosted service that routes across 200+ models with one key. Pick by whether you want a library or a service.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "LiteLLM vs OpenRouter",
        "LLM gateway",
        "LLM routing",
        "multi-model",
        "LLM proxy"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LiteLLM vs Portkey",
      "slug": "litellm-vs-portkey",
      "url": "https://learn.engineering.vips.edu/compare/litellm-vs-portkey",
      "description": "LiteLLM is open-source (self-hosted) LLM gateway. Portkey is a managed AI gateway with observability and guardrails. Pick by whether you want to self-host.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "LiteLLM vs Portkey",
        "AI gateway",
        "LLM observability",
        "LLM proxy",
        "multi-model routing"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LitGPT vs Axolotl",
      "slug": "litgpt-vs-axolotl",
      "url": "https://learn.engineering.vips.edu/compare/litgpt-vs-axolotl",
      "description": "LitGPT is a PyTorch Lightning-native LLM training framework; Axolotl is a YAML-config fine-tuning toolkit. LitGPT for control and from-scratch training; Axolotl for config-first adapter finetuning.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "LitGPT",
        "Axolotl",
        "LLM fine-tuning",
        "LoRA",
        "PyTorch Lightning"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Marker vs Unstructured.io",
      "slug": "marker-pdf-vs-unstructured-io",
      "url": "https://learn.engineering.vips.edu/compare/marker-pdf-vs-unstructured-io",
      "description": "Marker is a fast GPU-friendly PDF-to-Markdown converter focused on high-fidelity text, tables, and math; Unstructured.io is a broader document-ingestion platform that parses PDFs, Office files, HTML, images, and more into structured elements for RAG.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Marker PDF",
        "Unstructured.io",
        "document parsing",
        "PDF to markdown",
        "RAG ingestion"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Marvin vs Pydantic AI",
      "slug": "marvin-vs-pydantic-ai",
      "url": "https://learn.engineering.vips.edu/compare/marvin-vs-pydantic-ai",
      "description": "Marvin is a high-level AI toolkit for Python that uses Pydantic under the hood. Pydantic AI is an agent framework from the Pydantic team. Both prioritize type-safe structured outputs.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Marvin vs Pydantic AI",
        "structured outputs",
        "Python LLM framework",
        "agent framework"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Server vs OpenAI Function Calling",
      "slug": "mcp-server-vs-openai-function-calling",
      "url": "https://learn.engineering.vips.edu/compare/mcp-server-vs-openai-function-calling",
      "description": "MCP (Model Context Protocol) standardizes tools across models. OpenAI function calling is vendor-specific per-request. Pick by ecosystem portability need.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Creativity",
      "keywords": [
        "MCP vs function calling",
        "Model Context Protocol",
        "function calling",
        "LLM tools"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP vs A2A Protocol",
      "slug": "mcp-vs-a2a-protocol",
      "url": "https://learn.engineering.vips.edu/compare/mcp-vs-a2a-protocol",
      "description": "MCP (Anthropic) standardises how LLMs call tools and data sources; A2A (Google) standardises how agents talk to other agents. They solve adjacent, not overlapping, problems.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "MCP",
        "A2A",
        "agent protocol",
        "Model Context Protocol",
        "agent interoperability"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP vs OpenAPI Tools",
      "slug": "mcp-vs-openapi-tools",
      "url": "https://learn.engineering.vips.edu/compare/mcp-vs-openapi-tools",
      "description": "MCP is a purpose-built protocol for exposing tools, resources, and prompts to LLMs; OpenAPI tools reuse your existing HTTP API spec. Pick by whether you're designing for AI-first or bolting AI onto existing services.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "MCP",
        "OpenAPI",
        "AI tools",
        "function calling",
        "Model Context Protocol"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Meilisearch vs Elasticsearch",
      "slug": "meilisearch-vs-elasticsearch",
      "url": "https://learn.engineering.vips.edu/compare/meilisearch-vs-elasticsearch",
      "description": "Meilisearch is a Rust-based typo-tolerant search engine built for instant search with minimal configuration; Elasticsearch is a battle-tested distributed search and analytics engine with deep configurability, vector support, and a massive ecosystem.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Meilisearch",
        "Elasticsearch",
        "search engine",
        "instant search",
        "vector search"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Llama 3.1 405B vs Llama 3.3 70B",
      "slug": "meta-llama-3-1-405b-vs-meta-llama-3-3-70b",
      "url": "https://learn.engineering.vips.edu/compare/meta-llama-3-1-405b-vs-meta-llama-3-3-70b",
      "description": "Llama 3.1 405B is Meta's 2024 flagship dense open model; Llama 3.3 70B is the 2024-end update that delivers near-405B quality in a 70B frame using improved instruction tuning.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Llama 3.1 405B",
        "Llama 3.3 70B",
        "Meta Llama",
        "open weights",
        "GPU serving"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Llama 3.1 8B Instruct vs Phi-3.5-mini",
      "slug": "meta-llama-3-1-8b-instruct-vs-microsoft-phi-3-5-mini",
      "url": "https://learn.engineering.vips.edu/compare/meta-llama-3-1-8b-instruct-vs-microsoft-phi-3-5-mini",
      "description": "Llama 3.1 8B wins on ecosystem, general chat, and tool use; Phi-3.5-mini wins on density per parameter (3.8B) and on-device / edge deployment. Pick by deployment envelope.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Llama 3.1 8B",
        "Phi-3.5-mini",
        "small LLM",
        "edge LLM",
        "open-weight"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Llama 3.1 8B Instruct vs Phi-4 (edge / small)",
      "slug": "meta-llama-3-1-8b-instruct-vs-microsoft-phi-4",
      "url": "https://learn.engineering.vips.edu/compare/meta-llama-3-1-8b-instruct-vs-microsoft-phi-4",
      "description": "Llama 3.1 8B and Microsoft Phi-4 (14B) are the top small models for edge and on-device use. Phi-4 wins on reasoning benchmarks; Llama wins on ecosystem and multilingual.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Llama 3.1 8B",
        "Phi-4",
        "small LLM",
        "edge AI",
        "on-device LLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Llama 3.3 70B vs Mistral Large 3",
      "slug": "meta-llama-3-3-70b-vs-mistral-large-3",
      "url": "https://learn.engineering.vips.edu/compare/meta-llama-3-3-70b-vs-mistral-large-3",
      "description": "Llama 3.3 70B and Mistral Large 3 are the strongest open/semi-open models in their weight class. Llama is open-weight; Mistral Large is stronger on reasoning but closed.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Llama 3.3 70B",
        "Mistral Large 3",
        "open LLM",
        "EU AI",
        "LLM comparison"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Llama 4 Maverick vs Llama 4 Scout",
      "slug": "meta-llama-4-maverick-vs-meta-llama-4-scout",
      "url": "https://learn.engineering.vips.edu/compare/meta-llama-4-maverick-vs-meta-llama-4-scout",
      "description": "Llama 4 Maverick is Meta's larger MoE model aimed at quality; Llama 4 Scout is the lighter MoE aimed at massive context and edge-ready deployment. Pick by context length and latency needs.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Llama 4 Maverick",
        "Llama 4 Scout",
        "Llama 4",
        "Meta LLM",
        "MoE"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Llama Guard 3 vs OpenAI Moderation",
      "slug": "meta-llama-guard-3-vs-openai-moderation",
      "url": "https://learn.engineering.vips.edu/compare/meta-llama-guard-3-vs-openai-moderation",
      "description": "Llama Guard 3 is an open-weights safety classifier for LLM inputs/outputs. OpenAI Moderation is a free API endpoint. Pick by self-hosting need and taxonomy fit.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Llama Guard vs OpenAI Moderation",
        "content moderation",
        "LLM safety",
        "safety classifier"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Microsoft Phi-4 vs Phi-3.5-mini",
      "slug": "microsoft-phi-4-vs-microsoft-phi-3-5-mini",
      "url": "https://learn.engineering.vips.edu/compare/microsoft-phi-4-vs-microsoft-phi-3-5-mini",
      "description": "Phi-4 is Microsoft's 14B reasoning-focused small model; Phi-3.5-mini is the 3.8B edge-ready model in the Phi family. Both prioritise data quality over size, but serve very different latency and hardware envelopes.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Phi-4",
        "Phi-3.5-mini",
        "small language model",
        "edge LLM",
        "Microsoft Phi"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Phi-4 vs Mistral NeMo 12B",
      "slug": "microsoft-phi-4-vs-mistral-nemo-12b",
      "url": "https://learn.engineering.vips.edu/compare/microsoft-phi-4-vs-mistral-nemo-12b",
      "description": "Microsoft Phi-4 (14B) and Mistral NeMo 12B are two high-quality open-weights small models. Phi-4 leads on reasoning and math; NeMo leads on multilingual and tool use.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Phi-4",
        "Mistral NeMo",
        "small open LLM",
        "reasoning small model"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Microsoft Phi-4 vs Mistral Small 3",
      "slug": "microsoft-phi-4-vs-mistral-small-3",
      "url": "https://learn.engineering.vips.edu/compare/microsoft-phi-4-vs-mistral-small-3",
      "description": "Phi-4 wins on reasoning density per parameter (14B that punches like a 30B); Mistral Small 3 wins on speed, permissive license, and strong general chat. Both fit on a single consumer GPU.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Phi-4",
        "Mistral Small 3",
        "small LLM",
        "reasoning models",
        "single-GPU deployment"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Milvus vs Qdrant",
      "slug": "milvus-vs-qdrant",
      "url": "https://learn.engineering.vips.edu/compare/milvus-vs-qdrant",
      "description": "Milvus is a horizontally scalable vector database built for billion-vector deployments; Qdrant is a Rust-based engine with strong single-node performance and simpler operations. Pick by scale and ops appetite.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Milvus",
        "Qdrant",
        "vector database",
        "billion-vector",
        "RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Milvus vs Weaviate",
      "slug": "milvus-vs-weaviate",
      "url": "https://learn.engineering.vips.edu/compare/milvus-vs-weaviate",
      "description": "Milvus (Zilliz) is a purpose-built distributed vector database; Weaviate is a modular vector DB with a rich module ecosystem. Milvus for massive scale; Weaviate for hybrid search and modules.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Milvus",
        "Weaviate",
        "vector database",
        "hybrid search"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Mixtral 8x22B vs Llama 3.1 70B Instruct",
      "slug": "mistral-8x22b-vs-meta-llama-3-1-70b-instruct",
      "url": "https://learn.engineering.vips.edu/compare/mistral-8x22b-vs-meta-llama-3-1-70b-instruct",
      "description": "Mixtral 8x22B (MoE) and Llama 3.1 70B Instruct (dense) are two shapes of open-weight mid-tier model. Mixtral is cheaper per token; Llama is simpler to serve and better at English.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Mixtral 8x22B",
        "Llama 3.1 70B",
        "MoE vs dense",
        "open-weight LLM",
        "LLM comparison"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Mistral Small 3 vs Mistral Nemo 12B",
      "slug": "mistral-small-3-vs-mistral-nemo-12b",
      "url": "https://learn.engineering.vips.edu/compare/mistral-small-3-vs-mistral-nemo-12b",
      "description": "Mistral Small 3 (24B, Jan 2025) is a dense efficiency-focused model with strong reasoning per parameter; Mistral Nemo 12B (with NVIDIA, 2024) is a smaller Apache-2.0 model tuned for 128k context and multilingual use.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Mistral Small 3",
        "Mistral Nemo",
        "open weights",
        "self-hosted LLM",
        "Apache 2.0"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "mxbai-rerank-large-v1 vs bge-reranker-v2-m3",
      "slug": "mixedbread-mxbai-rerank-large-v1-vs-bge-reranker-v2-m3",
      "url": "https://learn.engineering.vips.edu/compare/mixedbread-mxbai-rerank-large-v1-vs-bge-reranker-v2-m3",
      "description": "mxbai-rerank-large-v1 from Mixedbread AI is an Apache-2.0 cross-encoder optimised for English retrieval reranking; BGE reranker v2 M3 from BAAI is a multilingual cross-encoder with broad language coverage.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "mxbai-rerank",
        "bge-reranker",
        "reranker",
        "cross encoder",
        "RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MLflow LLM Evaluate vs Promptfoo",
      "slug": "mlflow-llm-evaluate-vs-promptfoo",
      "url": "https://learn.engineering.vips.edu/compare/mlflow-llm-evaluate-vs-promptfoo",
      "description": "MLflow LLM Evaluate is an enterprise MLflow-integrated LLM evaluator; Promptfoo is a dev-friendly CLI/YAML LLM eval tool. MLflow for ops-heavy teams; Promptfoo for fast iteration.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "MLflow LLM Evaluate",
        "Promptfoo",
        "LLM eval",
        "prompt testing"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Modal vs RunPod",
      "slug": "modal-vs-runpod",
      "url": "https://learn.engineering.vips.edu/compare/modal-vs-runpod",
      "description": "Modal and RunPod both provide serverless and dedicated GPU infrastructure for AI workloads. Modal prioritizes developer experience; RunPod prioritizes raw cost per GPU hour.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Modal vs RunPod",
        "GPU cloud",
        "serverless GPU",
        "AI infrastructure",
        "fine-tuning"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "NVIDIA NeMo Guardrails vs LLM Guard",
      "slug": "nvidia-nemo-guardrails-vs-llm-guard",
      "url": "https://learn.engineering.vips.edu/compare/nvidia-nemo-guardrails-vs-llm-guard",
      "description": "NeMo Guardrails uses Colang DSL for programmable dialogue rails; LLM Guard is a Python middleware with pre- and post-scanners for prompts and outputs. Rails vs scanners.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "NeMo Guardrails",
        "LLM Guard",
        "LLM safety",
        "AI guardrails",
        "prompt injection"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Ollama vs vLLM",
      "slug": "ollama-vs-vllm",
      "url": "https://learn.engineering.vips.edu/compare/ollama-vs-vllm",
      "description": "Ollama and vLLM are both used to run open-weight LLMs. Ollama is for local/dev use; vLLM is for production serving with batching and high throughput.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Ollama",
        "vLLM",
        "LLM inference",
        "self-hosted LLM",
        "model serving"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Open-Weights vs Closed API",
      "slug": "open-weights-vs-closed-api",
      "url": "https://learn.engineering.vips.edu/compare/open-weights-vs-closed-api",
      "description": "Open-weights models (Llama, Qwen, DeepSeek) you can self-host; closed APIs (Claude, GPT, Gemini) you can only call. Open for control and data; closed for frontier quality and ops.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "open-weights LLM",
        "closed API",
        "self-hosted LLM",
        "Llama vs Claude"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenAI Agents SDK vs Swarm",
      "slug": "openai-agents-sdk-vs-openai-swarm-framework",
      "url": "https://learn.engineering.vips.edu/compare/openai-agents-sdk-vs-openai-swarm-framework",
      "description": "The OpenAI Agents SDK is the production-supported successor; Swarm was an educational prototype. Use Agents SDK for anything going to production, and study Swarm only to understand the hand-off pattern.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "OpenAI Agents SDK",
        "Swarm",
        "agent framework",
        "OpenAI",
        "multi-agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPT-4.1 vs GPT-4o",
      "slug": "openai-gpt-4-1-vs-openai-gpt-4o",
      "url": "https://learn.engineering.vips.edu/compare/openai-gpt-4-1-vs-openai-gpt-4o",
      "description": "GPT-4.1 wins on coding, instruction following, and long-context reliability; GPT-4o wins on native multimodal breadth (voice, vision) and interactive latency. Pick by whether your product is agent-like or chat-like.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "GPT-4.1",
        "GPT-4o",
        "OpenAI",
        "LLM comparison",
        "multimodal"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPT-4o vs Gemini 2.0 Flash",
      "slug": "openai-gpt-4o-vs-google-gemini-2-0-flash",
      "url": "https://learn.engineering.vips.edu/compare/openai-gpt-4o-vs-google-gemini-2-0-flash",
      "description": "GPT-4o and Gemini 2.0 Flash were the workhorse multimodal models of 2024-2025. Both remain in wide use. GPT-4o wins on voice and ecosystem; Flash wins on cost and long context.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "GPT-4o",
        "Gemini 2.0 Flash",
        "multimodal LLM",
        "voice AI",
        "LLM comparison"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPT-5 Nano vs GPT-5 Mini",
      "slug": "openai-gpt-5-nano-vs-openai-gpt-5-mini",
      "url": "https://learn.engineering.vips.edu/compare/openai-gpt-5-nano-vs-openai-gpt-5-mini",
      "description": "GPT-5 Nano is OpenAI's cheapest and fastest GPT-5 family tier for high-volume simple tasks; GPT-5 Mini is the mid-tier balance of reasoning and latency for everyday production use.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "GPT-5 Nano",
        "GPT-5 Mini",
        "OpenAI",
        "small LLM",
        "cheap inference"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPT-5 vs Grok 4",
      "slug": "openai-gpt-5-vs-xai-grok-4",
      "url": "https://learn.engineering.vips.edu/compare/openai-gpt-5-vs-xai-grok-4",
      "description": "GPT-5 leads on ecosystem, multimodal breadth, and enterprise maturity. Grok 4 competes on reasoning and has unique X (Twitter) data access. Pick by whether you need real-time social data or enterprise tooling.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "GPT-5 vs Grok",
        "Grok 4",
        "xAI",
        "LLM comparison",
        "OpenAI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenAI o1 vs o3",
      "slug": "openai-o1-vs-openai-o3",
      "url": "https://learn.engineering.vips.edu/compare/openai-o1-vs-openai-o3",
      "description": "OpenAI o1 vs o3: two generations of the same reasoning-model line. o3 is stronger across the board; o1 remains cheaper and is still fine for many deliberation tasks.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "OpenAI o1",
        "OpenAI o3",
        "reasoning model",
        "test-time compute",
        "LLM comparison"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Perplexity Sonar vs You.com Smart",
      "slug": "perplexity-sonar-vs-you-com-smart",
      "url": "https://learn.engineering.vips.edu/compare/perplexity-sonar-vs-you-com-smart",
      "description": "Perplexity Sonar and You.com Smart are answer-engine APIs that combine web search with LLM synthesis. Sonar has stronger citations and latency; Smart has broader mode flexibility.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Perplexity Sonar vs You.com",
        "answer engine",
        "web-grounded LLM",
        "citations",
        "RAG search"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "pgvector vs Qdrant",
      "slug": "pgvector-vs-qdrant",
      "url": "https://learn.engineering.vips.edu/compare/pgvector-vs-qdrant",
      "description": "pgvector brings vector search into Postgres so your embeddings live next to your data; Qdrant is a dedicated vector search engine with stronger pure-vector performance. Pick by whether you value data locality or specialised throughput.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "pgvector",
        "Qdrant",
        "vector database",
        "Postgres",
        "RAG infrastructure"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Pinecone vs Qdrant",
      "slug": "pinecone-vs-qdrant",
      "url": "https://learn.engineering.vips.edu/compare/pinecone-vs-qdrant",
      "description": "Pinecone is a fully managed vector database; Qdrant is open-source (self-host or managed cloud). Pinecone wins on zero-ops; Qdrant wins on cost and flexibility.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Pinecone",
        "Qdrant",
        "vector database",
        "RAG",
        "semantic search"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Pinecone vs Weaviate",
      "slug": "pinecone-vs-weaviate",
      "url": "https://learn.engineering.vips.edu/compare/pinecone-vs-weaviate",
      "description": "Pinecone is a fully managed serverless vector database with zero ops; Weaviate is a feature-rich vector database available as managed or self-hosted with built-in modules. Pick by whether you want hands-off or more control.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Pinecone",
        "Weaviate",
        "vector database",
        "RAG",
        "managed vector DB"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Prompt Caching vs RAG",
      "slug": "prompt-caching-vs-rag",
      "url": "https://learn.engineering.vips.edu/compare/prompt-caching-vs-rag",
      "description": "Prompt caching reuses expensive prefix computation; RAG retrieves relevant chunks at inference time. They solve different problems and often work together.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "prompt caching",
        "RAG",
        "LLM optimization",
        "Anthropic caching",
        "inference cost"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Prompt Engineering vs Fine-Tuning",
      "slug": "prompt-engineering-vs-fine-tuning",
      "url": "https://learn.engineering.vips.edu/compare/prompt-engineering-vs-fine-tuning",
      "description": "Prompt engineering shapes model behaviour via input; fine-tuning modifies weights. Prompt engineering for fast iteration and broad tasks; fine-tuning for style, format, or large corpora.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "prompt engineering",
        "fine-tuning",
        "LLM adaptation",
        "LoRA"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "PromptBench vs Promptfoo",
      "slug": "promptbench-vs-promptfoo",
      "url": "https://learn.engineering.vips.edu/compare/promptbench-vs-promptfoo",
      "description": "PromptBench is a Microsoft Research benchmark harness for evaluating LLM robustness across tasks and adversarial prompts; Promptfoo is a developer-focused CLI and CI tool for regression-testing prompts, datasets, and models in production workflows.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "PromptBench",
        "Promptfoo",
        "LLM evaluation",
        "prompt testing",
        "regression test"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "ReAct vs Reflexion",
      "slug": "react-pattern-vs-reflexion-pattern",
      "url": "https://learn.engineering.vips.edu/compare/react-pattern-vs-reflexion-pattern",
      "description": "ReAct interleaves reasoning with tool actions. Reflexion adds a self-critique loop that improves across attempts. Pick by whether you need multi-attempt learning.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Creativity",
      "keywords": [
        "ReAct vs Reflexion",
        "agent patterns",
        "LLM agents",
        "self-reflection",
        "tool use"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Retrieval-Augmented Generation vs Prompt Caching",
      "slug": "retrieval-augmented-generation-vs-prompt-caching",
      "url": "https://learn.engineering.vips.edu/compare/retrieval-augmented-generation-vs-prompt-caching",
      "description": "RAG selectively retrieves relevant context into prompts; prompt caching reuses prefix tokens across requests. RAG for large corpora; caching for stable, frequently-repeated contexts.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "RAG",
        "prompt caching",
        "retrieval augmented generation",
        "LLM cost"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Runway Gen-3 Alpha vs OpenAI Sora",
      "slug": "runway-gen-3-alpha-vs-openai-sora",
      "url": "https://learn.engineering.vips.edu/compare/runway-gen-3-alpha-vs-openai-sora",
      "description": "Runway Gen-3 Alpha is a production-tuned text/image-to-video model used heavily by creative studios; OpenAI Sora is a closed frontier video model with longer, more physically consistent clips.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Sora",
        "Runway Gen-3",
        "text to video",
        "AI video",
        "video generation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "sentence-transformers vs txtai",
      "slug": "sentence-transformers-vs-txtai",
      "url": "https://learn.engineering.vips.edu/compare/sentence-transformers-vs-txtai",
      "description": "sentence-transformers is the standard Python library for embedding models. txtai is a broader semantic-search + pipeline framework. Pick by whether you need a library or a platform.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "sentence-transformers vs txtai",
        "Python embeddings",
        "semantic search",
        "vector search"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "SGLang vs vLLM",
      "slug": "sglang-vs-vllm",
      "url": "https://learn.engineering.vips.edu/compare/sglang-vs-vllm",
      "description": "SGLang and vLLM are both open-source LLM inference servers for high-throughput serving. vLLM is the most widely deployed; SGLang is catching up fast on MoE and structured-generation throughput.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "SGLang vs vLLM",
        "LLM inference",
        "LLM serving",
        "MoE inference",
        "vLLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "stdio vs SSE (MCP transport)",
      "slug": "sse-mcp-transport-vs-stdio",
      "url": "https://learn.engineering.vips.edu/compare/sse-mcp-transport-vs-stdio",
      "description": "MCP supports two primary transports: stdio (local process) and SSE/HTTP (remote). stdio wins for local tools; SSE wins for remote and multi-client services.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "MCP",
        "Model Context Protocol",
        "stdio transport",
        "SSE transport",
        "streamable HTTP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "TensorRT-LLM vs vLLM",
      "slug": "tensorrt-llm-vs-vllm",
      "url": "https://learn.engineering.vips.edu/compare/tensorrt-llm-vs-vllm",
      "description": "TensorRT-LLM is NVIDIA's AOT-compiled inference library for absolute best GPU performance. vLLM is the community open-source server. Pick by whether you need NVIDIA-specific peak performance.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "TensorRT-LLM vs vLLM",
        "LLM inference",
        "NVIDIA LLM",
        "LLM serving",
        "GPU optimization"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "TRL vs Unsloth",
      "slug": "trl-vs-unsloth",
      "url": "https://learn.engineering.vips.edu/compare/trl-vs-unsloth",
      "description": "TRL (Hugging Face) is the canonical SFT/RLHF/DPO trainer library. Unsloth is a 2x-faster, memory-efficient single-GPU fine-tuner. Pick by scale and speed needs.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "TRL vs Unsloth",
        "fine-tuning",
        "LoRA",
        "QLoRA",
        "Hugging Face trainer"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Unstructured.io vs LlamaParse",
      "slug": "unstructured-io-vs-llama-parse",
      "url": "https://learn.engineering.vips.edu/compare/unstructured-io-vs-llama-parse",
      "description": "Unstructured.io and LlamaParse extract LLM-ready text from messy documents. Unstructured is format-broad and self-hostable; LlamaParse uses LLM-based parsing for stronger tables.",
      "category": "comparisons",
      "categoryTitle": "Model & Tool Comparisons",
      "pillar": "Capability",
      "keywords": [
        "Unstructured.io",
        "LlamaParse",
        "document parsing",
        "RAG ingestion",
        "PDF extraction"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP 1Password Server",
      "slug": "mcp-1password-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-1password-server",
      "description": "A community MCP server that exposes the 1Password CLI and Connect API — read items, fetch secrets, list vaults — to Claude Desktop under strict scoping and with no secret leakage to the model.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP 1Password",
        "secrets manager",
        "service account",
        "op CLI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Adobe XD Server",
      "slug": "mcp-adobe-xd-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-adobe-xd-server",
      "description": "Community MCP server that reads Adobe XD cloud documents and design tokens via the Creative Cloud APIs — useful for LLM-driven design-system documentation and code-handoff.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Adobe XD",
        "XD MCP server",
        "Adobe Creative Cloud MCP",
        "design handoff"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Aider as an MCP-Compatible Client",
      "slug": "mcp-aider-client",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-aider-client",
      "description": "Aider is a terminal-based AI pair programmer — recent releases add Model Context Protocol client support, letting you pull in MCP servers alongside Aider's native git-aware editing.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "Aider",
        "CLI coding agent",
        "MCP client",
        "git-aware"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Airbyte Server",
      "slug": "mcp-airbyte-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-airbyte-server",
      "description": "Community MCP server for Airbyte — exposes connectors, connections, and sync jobs so Claude can inspect data pipelines, trigger syncs, and troubleshoot failures.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Airbyte",
        "Airbyte MCP server",
        "data pipeline AI",
        "ELT MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Airtable Server",
      "slug": "mcp-airtable-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-airtable-server",
      "description": "MCP server for Airtable — list bases, read records, create and update rows, and run formula searches. A widely-used community server built on the Airtable Web API.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Airtable",
        "Airtable MCP",
        "no-code database MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP + Anthropic SDK Integration",
      "slug": "mcp-anthropic-sdk-integration",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-anthropic-sdk-integration",
      "description": "How to wire MCP servers into apps built with the official Anthropic Python and TypeScript SDKs — passing MCP tool definitions to Claude and handling tool calls end-to-end.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "Anthropic SDK MCP",
        "Claude SDK MCP integration",
        "MCP Python SDK",
        "MCP TypeScript SDK"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Anytype Server",
      "slug": "mcp-anytype-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-anytype-server",
      "description": "Community MCP server for Anytype — the local-first knowledge OS — exposing spaces, objects, and types so Claude can reason about a user's private graph.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Anytype",
        "Anytype MCP server",
        "local-first AI",
        "personal knowledge graph"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Apache NiFi Server",
      "slug": "mcp-apache-nifi-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-apache-nifi-server",
      "description": "Community MCP server for Apache NiFi — lets Claude list process groups, inspect flow files, start/stop processors, and troubleshoot dataflows via the NiFi REST API.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP NiFi",
        "Apache NiFi MCP",
        "dataflow MCP",
        "streaming integration"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Apple Notes Server",
      "slug": "mcp-apple-notes-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-apple-notes-server",
      "description": "A community MCP server that exposes Apple Notes on macOS — list notes, read full content, create or append notes — to Claude Desktop via AppleScript bridging over stdio transport.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Apple Notes",
        "macOS",
        "AppleScript",
        "personal knowledge"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP AWS Server",
      "slug": "mcp-aws-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-aws-server",
      "description": "AWS publishes a suite of official MCP servers covering Bedrock, CloudWatch, S3, CDK, and more. Together they let LLM clients operate across AWS resources through scoped IAM credentials.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP AWS",
        "awslabs MCP",
        "AWS MCP server",
        "Bedrock MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Azure Server",
      "slug": "mcp-azure-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-azure-server",
      "description": "Microsoft ships official MCP servers for Azure — including an Azure MCP server for the core control plane, plus focused servers for Cosmos DB, Azure DevOps, and AI Foundry.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Azure",
        "Azure MCP server",
        "Cosmos DB MCP",
        "Azure DevOps MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP BigQuery Server",
      "slug": "mcp-bigquery-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-bigquery-server",
      "description": "MCP server exposing BigQuery dataset browsing and SQL execution to LLM clients. Most implementations are community-maintained on top of Google's official BigQuery client libraries.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP BigQuery",
        "BigQuery MCP",
        "Claude BigQuery",
        "GCP data warehouse MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Bitbucket Server",
      "slug": "mcp-bitbucket-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-bitbucket-server",
      "description": "Community MCP server for Atlassian Bitbucket Cloud (and Bitbucket Data Center) — exposes repositories, pull requests, pipelines, and branch operations for LLM-driven dev workflows.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Bitbucket",
        "Atlassian MCP",
        "PR review MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Box Server",
      "slug": "mcp-box-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-box-server",
      "description": "MCP server for Box — gives Claude scoped access to Box folders, files, metadata, and Box AI Q&A, enabling document search, summarisation, and controlled uploads through the Model Context Protocol.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Box",
        "Box MCP server",
        "Box AI MCP",
        "content cloud MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Brave Search Server",
      "slug": "mcp-brave-search-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-brave-search-server",
      "description": "The Brave Search MCP server gives Claude and other MCP clients a privacy-respecting web search tool, powered by the Brave Search API — no Google dependency, no user tracking.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Brave Search",
        "Brave Search MCP",
        "LLM web search",
        "private search API",
        "Claude Desktop search"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Building an MCP Server in Python (Tutorial)",
      "slug": "mcp-building-python-server-tutorial",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-building-python-server-tutorial",
      "description": "Step-by-step walkthrough of writing a minimal Model Context Protocol server in Python using the official MCP SDK and FastMCP — tools, resources, stdio transport.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Python",
        "FastMCP tutorial",
        "build MCP server",
        "mcp SDK Python"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Canva Server",
      "slug": "mcp-canva-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-canva-server",
      "description": "MCP server for Canva — lets Claude browse brand assets, generate designs from templates, export PDFs and PNGs, and publish content to a Canva team workspace.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Canva",
        "Canva MCP server",
        "Canva Connect API",
        "AI design"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Cassandra Server",
      "slug": "mcp-cassandra-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-cassandra-server",
      "description": "Community MCP server for Apache Cassandra — exposes CQL query execution, keyspace and table introspection, so an LLM client can explore wide-column NoSQL data stored across a Cassandra cluster.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Cassandra",
        "MCP server",
        "CQL",
        "wide-column NoSQL"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP CircleCI Server",
      "slug": "mcp-circleci-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-circleci-server",
      "description": "Community MCP server for CircleCI — exposes pipelines, workflows, jobs, and artifacts so LLM clients can inspect build failures, rerun jobs, and help debug CI configs.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP CircleCI",
        "CI/CD MCP",
        "build MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Desktop as an MCP Client",
      "slug": "mcp-claude-desktop-client",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-claude-desktop-client",
      "description": "Claude Desktop is Anthropic's reference MCP client — install it on macOS or Windows, edit a single JSON config, and Claude gains access to filesystems, GitHub, Slack, and more.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "Claude Desktop",
        "MCP client",
        "claude_desktop_config.json",
        "Anthropic Claude Desktop",
        "MCP setup"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP ClickHouse Server",
      "slug": "mcp-clickhouse-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-clickhouse-server",
      "description": "Community MCP server that connects LLM clients to ClickHouse — the columnar OLAP database — for fast analytical SQL over billions of rows with schema introspection and query execution tools.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP ClickHouse",
        "MCP server",
        "ClickHouse analytics",
        "OLAP MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Cline (formerly Claude Dev) as an MCP Client",
      "slug": "mcp-cline-client",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-cline-client",
      "description": "Cline is a VS Code extension that turns Claude into a coding agent — it is an MCP client, so every server you add shows up as a tool in Cline's plan-and-act loop.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "Cline MCP",
        "Claude Dev",
        "Cline VS Code",
        "VS Code MCP",
        "autonomous coding agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Cloudflare Server",
      "slug": "mcp-cloudflare-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-cloudflare-server",
      "description": "The Cloudflare MCP server exposes Workers, KV, R2, D1, and DNS management as MCP tools — letting Claude operate your Cloudflare account through a scoped API token.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Cloudflare",
        "Cloudflare MCP",
        "Workers MCP",
        "D1 MCP",
        "agent devops"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Confluence Server",
      "slug": "mcp-confluence-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-confluence-server",
      "description": "MCP server for Atlassian Confluence — search spaces, read pages, create and update content. Atlassian hosts an official Cloud endpoint; mcp-atlassian covers self-hosted.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Confluence",
        "Atlassian MCP",
        "wiki MCP",
        "knowledge base MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Consul Server",
      "slug": "mcp-consul-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-consul-server",
      "description": "Community MCP server for HashiCorp Consul — exposes service discovery, health checks, and the KV store to MCP clients so LLMs can diagnose service topology and configuration.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Consul",
        "service discovery MCP",
        "HashiCorp"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Continue as an MCP Client",
      "slug": "mcp-continue-client",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-continue-client",
      "description": "Continue.dev is an open-source AI coding assistant for VS Code and JetBrains — and an MCP client. It can spawn MCP servers and expose their tools to chat, agents, and slash commands.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "Continue",
        "VS Code",
        "JetBrains",
        "MCP client"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Cursor as an MCP Client",
      "slug": "mcp-cursor-client",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-cursor-client",
      "description": "Cursor, the AI-first code editor, is an MCP client — add MCP servers via ~/.cursor/mcp.json and every agent in Cursor can call GitHub, Figma, Postgres, and more.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "Cursor MCP",
        "Cursor client",
        "mcp.json Cursor",
        "AI code editor",
        "Cursor agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Dagster Server",
      "slug": "mcp-dagster-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-dagster-server",
      "description": "Community MCP server for Dagster — exposes the asset graph, run history, and ops via the Dagster GraphQL API so Claude can reason about data pipelines and launch backfills.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Dagster",
        "Dagster MCP server",
        "software-defined assets",
        "data orchestration"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Databricks Server",
      "slug": "mcp-databricks-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-databricks-server",
      "description": "MCP server for the Databricks Lakehouse — run SQL, browse Unity Catalog, trigger jobs, and interact with Mosaic AI endpoints through Model Context Protocol tools.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Databricks",
        "Databricks MCP",
        "Unity Catalog MCP",
        "Lakehouse MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Datadog Server",
      "slug": "mcp-datadog-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-datadog-server",
      "description": "A community MCP server that exposes Datadog — metrics, logs, monitors, events, service catalog — to Claude Desktop over stdio, authenticated with API and application keys.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Datadog",
        "observability",
        "logs",
        "monitors"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP dbt Server",
      "slug": "mcp-dbt-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-dbt-server",
      "description": "Community MCP server for dbt (data build tool) — exposes model graph, run/test commands, and documentation lookup so LLMs can help author, run, and debug analytics-engineering projects.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP dbt",
        "data build tool",
        "analytics engineering"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Discord Server",
      "slug": "mcp-discord-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-discord-server",
      "description": "A community-maintained MCP server that lets LLM clients like Claude Desktop read channels, post messages, and manage Discord guilds through a bot token over stdio transport.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Discord",
        "Discord bot",
        "Claude Desktop",
        "community MCP server"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Docker Server",
      "slug": "mcp-docker-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-docker-server",
      "description": "Community MCP server that lets a client control the local Docker daemon — list containers, run images, stream logs, inspect networks — through Model Context Protocol tools.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Docker",
        "Claude Docker server",
        "MCP container",
        "Docker Engine MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Dropbox Server",
      "slug": "mcp-dropbox-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-dropbox-server",
      "description": "Community MCP server that exposes Dropbox file operations — list, upload, download, share, and search — so Claude and other MCP clients can work against a user's Dropbox storage.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Dropbox",
        "Dropbox MCP server",
        "Dropbox AI integration",
        "cloud storage MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Apache Druid Server",
      "slug": "mcp-druid-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-druid-server",
      "description": "Community MCP server for Apache Druid — the real-time analytics database. Exposes datasource listing, schema introspection, and Druid SQL execution to LLM clients for sub-second queries over event streams.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Druid",
        "Apache Druid",
        "real-time OLAP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Dune Analytics Server",
      "slug": "mcp-dune-analytics-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-dune-analytics-server",
      "description": "Community MCP server for Dune Analytics — the on-chain analytics platform. Exposes saved queries, executions, and dashboard results to LLM clients working on Web3 research and tokenomics dashboards.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Dune",
        "Dune Analytics",
        "on-chain data MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP DynamoDB Server",
      "slug": "mcp-dynamodb-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-dynamodb-server",
      "description": "A community MCP server that exposes AWS DynamoDB — table listing, item get/put, queries, scans — to Claude Desktop over stdio using standard AWS credentials.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP DynamoDB",
        "AWS",
        "NoSQL",
        "key-value"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "The MCP Ecosystem in 2026",
      "slug": "mcp-ecosystem-in-2026",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-ecosystem-in-2026",
      "description": "A snapshot of the Model Context Protocol ecosystem as of April 2026 — who adopted it, what changed in the spec, and where it's heading next alongside A2A and other agent protocols.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP 2026",
        "MCP ecosystem",
        "agent protocols",
        "A2A vs MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Elasticsearch Server",
      "slug": "mcp-elasticsearch-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-elasticsearch-server",
      "description": "MCP server that lets LLM clients run queries, inspect mappings, and manage indices on an Elasticsearch or OpenSearch cluster. Elastic ships an official implementation alongside community variants.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Elasticsearch",
        "Elastic MCP",
        "OpenSearch MCP",
        "log search MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Emacs gptel Client",
      "slug": "mcp-emacs-gptel-client",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-emacs-gptel-client",
      "description": "gptel — the popular Emacs LLM client — speaks MCP, letting Emacs users attach MCP servers to any gptel chat buffer for tool use, resource browsing, and context injection.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Emacs",
        "gptel MCP",
        "Emacs LLM",
        "Model Context Protocol Emacs"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Figma Server",
      "slug": "mcp-figma-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-figma-server",
      "description": "The Figma MCP server exposes frames, components, and Dev Mode data from a Figma file so Claude and Cursor can turn designs into code with real selection context.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Figma",
        "Figma MCP server",
        "design to code",
        "Dev Mode MCP",
        "Cursor Figma"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Filesystem Server",
      "slug": "mcp-filesystem-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-filesystem-server",
      "description": "The reference MCP filesystem server from Anthropic — gives LLM clients like Claude Desktop safe, scoped read/write access to local directories over stdio transport.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP filesystem",
        "MCP server",
        "Claude Desktop",
        "stdio transport"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Firebase Server",
      "slug": "mcp-firebase-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-firebase-server",
      "description": "A community MCP server that exposes Firebase — Firestore, Realtime Database, Auth users, Cloud Storage — to Claude Desktop via the Admin SDK. Handy for prototyping and ops on Firebase apps.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Firebase",
        "Firestore",
        "Admin SDK",
        "Realtime Database"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Fivetran Server",
      "slug": "mcp-fivetran-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-fivetran-server",
      "description": "Community MCP server for Fivetran — lets Claude list connectors, check sync status, and start resync or rescan jobs via the Fivetran REST API.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Fivetran",
        "Fivetran MCP server",
        "data pipeline MCP",
        "analytics engineering"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Fleak Client",
      "slug": "mcp-fleak-client",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-fleak-client",
      "description": "Fleak — the low-code AI workflow builder — functions as an MCP client, letting teams compose MCP tools into serverless pipelines that Claude or other LLMs can call on demand.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Fleak",
        "Fleak MCP client",
        "low-code AI",
        "serverless AI workflow"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP for Developers — building your first server",
      "slug": "mcp-for-developers",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-for-developers",
      "description": "A developer-focused introduction to building MCP servers: transports, primitives (tools/resources/prompts), the TypeScript and Python SDKs, and how Claude Desktop loads your server.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP tutorial",
        "MCP server",
        "Python SDK",
        "TypeScript SDK",
        "build MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Framer Server",
      "slug": "mcp-framer-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-framer-server",
      "description": "Community MCP server for Framer — reads and updates published sites and CMS entries via the Framer API so Claude can draft pages, update copy, and publish marketing changes.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Framer",
        "Framer MCP server",
        "AI landing page",
        "Framer CMS"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Google Cloud Server",
      "slug": "mcp-gcp-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-gcp-server",
      "description": "Google Cloud exposes MCP servers for Vertex AI, BigQuery, Cloud Run, and more. Together they let LLM clients inspect and operate GCP resources via Application Default Credentials.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP GCP",
        "Google Cloud MCP",
        "Vertex AI MCP",
        "BigQuery MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Gitea Server",
      "slug": "mcp-gitea-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-gitea-server",
      "description": "Community MCP server for Gitea and Forgejo — self-hosted Git forges. Exposes repos, issues, pull requests, and releases through the Gitea REST API to LLM clients.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Gitea",
        "Forgejo MCP",
        "self-hosted git"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP GitHub Actions Server",
      "slug": "mcp-github-actions-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-github-actions-server",
      "description": "Community MCP server dedicated to GitHub Actions — exposes workflow runs, job logs, artifacts, and rerun controls so LLM clients can triage CI failures without leaving chat.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP GitHub Actions",
        "CI MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP GitHub Server",
      "slug": "mcp-github-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-github-server",
      "description": "The official GitHub MCP server lets Claude and other MCP clients read repositories, manage issues, review pull requests, and trigger workflows through a Personal Access Token or GitHub App.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP GitHub",
        "GitHub MCP server",
        "Claude Desktop GitHub",
        "MCP pull request",
        "github PAT"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP GitLab Server",
      "slug": "mcp-gitlab-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-gitlab-server",
      "description": "The MCP GitLab server gives Claude and other MCP clients read and write access to GitLab projects — files, issues, merge requests, and CI pipelines — authenticated with a Personal Access Token.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP GitLab",
        "GitLab MCP",
        "self-hosted GitLab",
        "merge request agent",
        "GitLab PAT"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Google Calendar Server",
      "slug": "mcp-google-calendar-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-google-calendar-server",
      "description": "MCP server that exposes Google Calendar operations — list events, create meetings, check free/busy — to LLM clients. Community-maintained, uses Google OAuth 2.0.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Google Calendar",
        "calendar MCP",
        "scheduling MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Google Drive Server",
      "slug": "mcp-google-drive-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-google-drive-server",
      "description": "The Google Drive MCP server exposes Drive files, Docs, Sheets, and Slides as MCP resources so LLM clients can search, read, and summarise content directly from a user's Drive.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Google Drive",
        "Google Drive MCP",
        "MCP OAuth",
        "Claude Drive integration",
        "Drive LLM search"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Google Maps Server",
      "slug": "mcp-google-maps-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-google-maps-server",
      "description": "The Google Maps MCP server exposes Places, Geocoding, Directions, and Distance Matrix APIs as MCP tools — the fastest way to give an LLM location-aware planning skills.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Google Maps",
        "Google Maps MCP",
        "geocoding LLM",
        "places API agent",
        "directions MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Goose Client (Block)",
      "slug": "mcp-goose-client",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-goose-client",
      "description": "Goose — Block's open-source on-machine AI agent — is a first-class MCP client that runs multiple MCP servers as 'extensions' for local code, web, and data tasks.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Goose",
        "Block Goose MCP",
        "on-machine AI agent",
        "open-source agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Grafana Server",
      "slug": "mcp-grafana-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-grafana-server",
      "description": "A community MCP server that exposes Grafana dashboards, alerts, and data-source queries to Claude Desktop — lets an LLM inspect panels, pull data, and triage alerts through the Grafana HTTP API.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Grafana",
        "observability",
        "dashboards",
        "alert triage"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Honeycomb Server",
      "slug": "mcp-honeycomb-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-honeycomb-server",
      "description": "Community MCP server for Honeycomb.io — exposes query execution, triggers, and SLO metadata so LLM clients can run ad-hoc BubbleUp-style investigations over wide-column events.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Honeycomb",
        "observability MCP",
        "BubbleUp"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP HubSpot Server",
      "slug": "mcp-hubspot-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-hubspot-server",
      "description": "MCP server for HubSpot — search contacts, read deals, create tickets, and update CRM properties from an LLM client. HubSpot has announced official MCP support alongside community packages.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP HubSpot",
        "HubSpot MCP",
        "CRM MCP",
        "Breeze AI MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Hugging Face Hub Server",
      "slug": "mcp-huggingface-hub-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-huggingface-hub-server",
      "description": "MCP server for the Hugging Face Hub — exposes model search, dataset browsing, Spaces, and inference endpoints so Claude can reason about and invoke open-source ML assets.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Hugging Face",
        "Hugging Face Hub MCP",
        "open-source ML",
        "inference endpoint"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP InfluxDB Server",
      "slug": "mcp-influxdb-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-influxdb-server",
      "description": "Community MCP server for InfluxDB time-series database — exposes Flux / InfluxQL queries, bucket listing, and measurement schema so LLMs can investigate metrics and IoT telemetry.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP InfluxDB",
        "MCP server",
        "time series",
        "Flux query MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Instagram Server",
      "slug": "mcp-instagram-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-instagram-server",
      "description": "Community MCP server for Instagram Graph API — exposes business account media, insights, and comment management so LLM clients can help draft and analyze social posts.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Instagram",
        "social media MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Jenkins Server",
      "slug": "mcp-jenkins-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-jenkins-server",
      "description": "Community MCP server for Jenkins — the open-source CI/CD workhorse. Exposes jobs, builds, logs, and triggers so LLM clients can inspect and control Jenkins pipelines.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Jenkins",
        "CI MCP",
        "pipelines"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Client: JetBrains IDEs",
      "slug": "mcp-jetbrains-ides-client",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-jetbrains-ides-client",
      "description": "Overview of MCP support in JetBrains IDEs (IntelliJ IDEA, PyCharm, WebStorm, GoLand, etc.) through the JetBrains AI Assistant plugin and the dedicated MCP Server for JetBrains.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP JetBrains",
        "IntelliJ MCP",
        "PyCharm MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Jira Server",
      "slug": "mcp-jira-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-jira-server",
      "description": "MCP server for Atlassian Jira — search issues, create tickets, transition workflows, and read sprint backlogs. Atlassian hosts an official remote MCP endpoint; community servers cover self-hosted Data Center.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Jira",
        "Atlassian MCP",
        "Jira AI",
        "issue tracker MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Jupyter Notebook Server",
      "slug": "mcp-jupyter-notebook-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-jupyter-notebook-server",
      "description": "A community MCP server that exposes a running Jupyter kernel to Claude Desktop — execute cells, read outputs, manage notebooks — so an LLM client can drive a Python notebook end to end.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Jupyter",
        "notebook",
        "data analysis",
        "Python kernel"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Kafka Server",
      "slug": "mcp-kafka-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-kafka-server",
      "description": "Community MCP server for Apache Kafka — exposes topic listing, consumer group status, and message produce/consume as tools so LLMs can inspect streaming pipelines.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Kafka",
        "Apache Kafka MCP",
        "streaming MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Kestra Server",
      "slug": "mcp-kestra-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-kestra-server",
      "description": "Community MCP server for Kestra — the open-source orchestrator — letting Claude list flows, trigger executions, and read task logs through Kestra's REST API.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Kestra",
        "Kestra MCP server",
        "YAML orchestration",
        "event-driven pipelines"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Kubeflow Server",
      "slug": "mcp-kubeflow-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-kubeflow-server",
      "description": "Community MCP server for Kubeflow Pipelines — lets Claude list pipelines, launch runs, and inspect Kubeflow training jobs on Kubernetes via the KFP API.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Kubeflow",
        "Kubeflow Pipelines MCP",
        "MLOps MCP",
        "Kubernetes ML"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Kubernetes Server",
      "slug": "mcp-kubernetes-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-kubernetes-server",
      "description": "Community-maintained MCP server that exposes kubectl-style operations — list pods, describe deployments, read logs, apply manifests — to any MCP client. Turns Claude into a read/write Kubernetes operator assistant.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Kubernetes",
        "kubectl MCP",
        "Claude Kubernetes",
        "k8s MCP server"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Using MCP Servers in LangChain",
      "slug": "mcp-langchain-integration",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-langchain-integration",
      "description": "How to consume Model Context Protocol servers as tools inside LangChain and LangGraph agents using the langchain-mcp-adapters package.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "LangChain MCP",
        "LangGraph MCP",
        "langchain-mcp-adapters",
        "MCP tools"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Integration: LibreChat",
      "slug": "mcp-librechat-integration",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-librechat-integration",
      "description": "LibreChat — the open-source multi-provider chat app — integrates MCP as a tool-provider layer, letting users chain MCP servers across OpenAI, Anthropic, Google, and local model back-ends.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "LibreChat MCP",
        "multi-provider MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Linear Server",
      "slug": "mcp-linear-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-linear-server",
      "description": "The Linear MCP server exposes issues, projects, and cycles from a Linear workspace as MCP tools — letting Claude triage, create, and update tickets inline with engineering work.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Linear",
        "Linear MCP server",
        "Linear API MCP",
        "agent issue tracker",
        "Claude Linear"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Using MCP Servers in LlamaIndex",
      "slug": "mcp-llamaindex-integration",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-llamaindex-integration",
      "description": "How to plug MCP servers into LlamaIndex agents and workflows using LlamaIndex's MCP tool-spec integrations.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "LlamaIndex MCP",
        "llama-index-tools-mcp",
        "LlamaIndex agents",
        "RAG MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Mailchimp Server",
      "slug": "mcp-mailchimp-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-mailchimp-server",
      "description": "Community MCP server for Mailchimp — exposes audiences, campaigns, automations, and reports so LLM clients can help draft, schedule, and analyze email marketing campaigns.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Mailchimp",
        "email marketing MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP MariaDB Server",
      "slug": "mcp-mariadb-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-mariadb-server",
      "description": "A community MCP server that exposes MariaDB — schema introspection, query execution, table and row inspection — to Claude Desktop over stdio transport. Mirrors the MySQL-family ergonomics.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP MariaDB",
        "MySQL",
        "database",
        "schema introspection"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Memory Server (Knowledge Graph)",
      "slug": "mcp-memory-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-memory-server",
      "description": "The Memory MCP server gives Claude and other MCP clients a persistent, knowledge-graph-shaped memory — entities, relations, and observations that survive across conversations.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP memory server",
        "LLM knowledge graph",
        "Claude long-term memory",
        "MCP persistence",
        "knowledge graph MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Metabase Server",
      "slug": "mcp-metabase-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-metabase-server",
      "description": "Community MCP server for Metabase — the open-source BI tool. Lets LLM clients list dashboards, run questions, and fetch query results through Metabase's authenticated HTTP API.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Metabase",
        "BI MCP",
        "dashboards as tools"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Microsoft Teams Server",
      "slug": "mcp-microsoft-teams-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-microsoft-teams-server",
      "description": "Community MCP server for Microsoft Teams via the Microsoft Graph API — exposes channels, messages, meetings, and chat tools so LLMs can read conversations and post updates.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Teams",
        "Microsoft Teams MCP",
        "Microsoft Graph"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Miro Server",
      "slug": "mcp-miro-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-miro-server",
      "description": "MCP server for Miro — exposes boards, frames, sticky notes, and cards via the Miro REST API, letting Claude facilitate remote workshops and turn discussions into structured board content.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Miro",
        "Miro MCP server",
        "AI whiteboard",
        "workshop facilitation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP MLflow Server",
      "slug": "mcp-mlflow-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-mlflow-server",
      "description": "Community MCP server for MLflow — exposes the tracking, registry, and model serving APIs so Claude can compare experiments, register models, and move stage transitions.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP MLflow",
        "MLflow MCP server",
        "ML registry",
        "model promotion"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Modal Server",
      "slug": "mcp-modal-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-modal-server",
      "description": "Community MCP server for Modal — exposes Modal apps, functions, and container images so Claude can launch serverless GPU jobs, inspect logs, and manage scheduled functions.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Modal",
        "Modal MCP server",
        "serverless GPU",
        "AI infra MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP MongoDB Server",
      "slug": "mcp-mongodb-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-mongodb-server",
      "description": "MCP server exposing MongoDB query, aggregation, and collection-management tools to LLM clients. MongoDB maintains an official server; community variants cover niche features.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP MongoDB",
        "Mongo MCP server",
        "Claude MongoDB",
        "Atlas MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Pattern: Multi-Tenant MCP Server Deployment",
      "slug": "mcp-multi-tenant-server-deployment",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-multi-tenant-server-deployment",
      "description": "Design pattern for running a single MCP server that serves multiple tenants safely — per-tenant credentials, scoped tool surface, audit trails, and rate-limiting across HTTP/SSE transports.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP multi-tenant",
        "MCP SaaS deployment"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Mural Server",
      "slug": "mcp-mural-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-mural-server",
      "description": "Community MCP server for Mural — connects Claude to Mural workspaces, murals, and widgets via the Mural REST API for AI-assisted facilitation, synthesis, and reporting.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Mural",
        "Mural MCP server",
        "AI facilitation",
        "design thinking"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP n8n Server",
      "slug": "mcp-n8n-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-n8n-server",
      "description": "A community MCP server that exposes n8n workflows and executions to Claude Desktop — list workflows, trigger runs, inspect results — from self-hosted or n8n Cloud instances.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP n8n",
        "workflow automation",
        "self-hosted",
        "iPaaS"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Neo4j Server",
      "slug": "mcp-neo4j-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-neo4j-server",
      "description": "Official Neo4j MCP server — lets LLM clients query the Neo4j graph database using Cypher, inspect schema labels and relationships, and traverse knowledge graphs through tool calls.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Neo4j",
        "MCP server",
        "Cypher",
        "graph database MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Neovim Client",
      "slug": "mcp-neovim-client",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-neovim-client",
      "description": "A Neovim plugin that speaks Model Context Protocol — turning Neovim into an MCP client that can use any MCP server (filesystem, GitHub, databases) alongside an LLM assistant.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Neovim",
        "Neovim MCP client",
        "LLM in Neovim",
        "avante mcp"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP NetSuite Server",
      "slug": "mcp-netsuite-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-netsuite-server",
      "description": "Community MCP server for Oracle NetSuite — exposes SuiteQL queries, saved searches, and REST record endpoints so Claude can answer accounting and ERP questions with grounded data.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP NetSuite",
        "NetSuite MCP",
        "SuiteQL MCP",
        "ERP AI agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP New Relic Server",
      "slug": "mcp-new-relic-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-new-relic-server",
      "description": "Community MCP server for New Relic — exposes NRQL queries, dashboards, and entity lookups so LLMs can answer observability questions from APM, infrastructure, and logs data.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP New Relic",
        "NRQL MCP",
        "APM MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Notion Calendar Server",
      "slug": "mcp-notion-calendar-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-notion-calendar-server",
      "description": "Community MCP server for Notion Calendar (formerly Cron) — surfaces events, availability, and scheduling actions so Claude can help block time and respond to calendar invites.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Notion Calendar",
        "Notion Calendar MCP",
        "AI scheduling",
        "calendar automation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Notion Server",
      "slug": "mcp-notion-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-notion-server",
      "description": "The Notion MCP server exposes pages, databases, and blocks from a Notion workspace as MCP tools — so Claude can search, read, and update Notion content through a scoped integration token.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Notion",
        "Notion MCP server",
        "Notion integration token",
        "Claude Notion",
        "agent Notion"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Obsidian Server",
      "slug": "mcp-obsidian-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-obsidian-server",
      "description": "The Obsidian MCP server exposes a local Obsidian vault — notes, tags, links, daily notes — as MCP tools so Claude can search and edit your personal knowledge graph.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Obsidian",
        "Obsidian MCP server",
        "PKM LLM",
        "Claude Obsidian",
        "Local REST API"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP OneDrive Server",
      "slug": "mcp-onedrive-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-onedrive-server",
      "description": "MCP server that connects Claude to Microsoft OneDrive via the Microsoft Graph API — list drives, read and write files, search, and share links from inside an MCP client.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP OneDrive",
        "OneDrive MCP server",
        "Microsoft Graph MCP",
        "M365 AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Using MCP Servers via OpenAI Agents SDK",
      "slug": "mcp-openai-agents-sdk-integration",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-openai-agents-sdk-integration",
      "description": "How to connect MCP servers to agents built with OpenAI's Agents SDK (Python and TypeScript) using the built-in MCPServerStdio and MCPServerSse classes.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "OpenAI Agents SDK",
        "OpenAI MCP",
        "Responses API MCP",
        "agents mcp"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP OpenTelemetry Server",
      "slug": "mcp-opentelemetry-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-opentelemetry-server",
      "description": "Community MCP server that proxies OpenTelemetry trace and metric queries to an OTLP-compatible backend — letting LLM clients reason about distributed traces and span relationships.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP OpenTelemetry",
        "OTel MCP",
        "distributed tracing"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Client: Open WebUI",
      "slug": "mcp-openwebui-client",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-openwebui-client",
      "description": "Open WebUI is a popular self-hosted UI for local and remote LLMs. Since 2025 it supports MCP servers as tool providers, letting self-hosters augment Ollama-backed models with MCP tools.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Open WebUI",
        "self-hosted LLM MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Oracle Server",
      "slug": "mcp-oracle-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-oracle-server",
      "description": "Community MCP server for Oracle databases — turns SQL*Plus-style access to Oracle Database 19c/23ai into safe, parameterised tools usable by Claude and other MCP clients.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Oracle",
        "Oracle Database MCP",
        "Oracle 23ai MCP",
        "SQL MCP server"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP — An Overview for Students",
      "slug": "mcp-overview-for-students",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-overview-for-students",
      "description": "A friendly, classroom-ready introduction to Model Context Protocol — why it exists, what a 'server' and 'client' actually are, and how engineering students can experiment with it on day one.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP for students",
        "Model Context Protocol intro",
        "student AI projects",
        "learn MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP PagerDuty Server",
      "slug": "mcp-pagerduty-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-pagerduty-server",
      "description": "A community MCP server that exposes PagerDuty incidents, services, schedules, and on-call data to Claude Desktop over stdio — useful for incident triage and on-call lookups from chat.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP PagerDuty",
        "incident management",
        "on-call",
        "SRE"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP PayPal Server",
      "slug": "mcp-paypal-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-paypal-server",
      "description": "Community MCP server for PayPal — exposes payments, invoices, subscriptions, and transaction search so LLM clients can help reconcile, issue refunds, and answer customer billing questions.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP PayPal",
        "payments MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Pinecone Server",
      "slug": "mcp-pinecone-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-pinecone-server",
      "description": "The Pinecone MCP server exposes vector search against a managed Pinecone index — giving Claude and other MCP clients semantic recall over a document corpus without bespoke RAG code.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Pinecone",
        "Pinecone MCP server",
        "vector search agent",
        "RAG MCP",
        "semantic search LLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Postgres Server",
      "slug": "mcp-postgres-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-postgres-server",
      "description": "The Postgres MCP server exposes a read-only SQL tool plus schema resources so LLM clients can explore a Postgres database safely without write access or connection sprawl.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Postgres",
        "Postgres MCP server",
        "LLM database access",
        "text-to-SQL",
        "read-only SQL agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Prefect Server",
      "slug": "mcp-prefect-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-prefect-server",
      "description": "Community MCP server for Prefect 2/3 — exposes flows, deployments, and runs through the Prefect REST API so Claude can inspect workflow state and trigger reruns.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Prefect",
        "Prefect MCP server",
        "Python orchestration",
        "workflow AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Prometheus Server",
      "slug": "mcp-prometheus-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-prometheus-server",
      "description": "A community MCP server that exposes Prometheus — instant queries, range queries, series metadata, label values — to Claude Desktop so the model can explore metrics through PromQL.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Prometheus",
        "PromQL",
        "observability",
        "metrics"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Prompts Capability: Deep Dive",
      "slug": "mcp-prompts-capability-deep-dive",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-prompts-capability-deep-dive",
      "description": "A deep dive into MCP's 'prompts' capability — how servers advertise parameterized prompt templates, how clients render them as slash commands, and how arguments flow through.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP prompts",
        "slash commands",
        "prompt templates",
        "capability"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Puppeteer Server",
      "slug": "mcp-puppeteer-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-puppeteer-server",
      "description": "The Puppeteer MCP server drives a headless Chrome browser from Claude and other MCP clients — navigate, click, fill, and screenshot any web page as an agent tool.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Puppeteer",
        "Puppeteer MCP server",
        "headless Chrome agent",
        "browser automation MCP",
        "LLM scraping"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP + Pydantic AI Integration",
      "slug": "mcp-pydantic-ai-integration",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-pydantic-ai-integration",
      "description": "Pydantic AI — the type-safe Python agent framework — ships first-class support for MCP servers, letting developers bind typed tools from MCP into agents backed by Claude, GPT, or Gemini.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "Pydantic AI MCP",
        "typed Python agents",
        "MCP Python",
        "Claude agent framework"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP RabbitMQ Server",
      "slug": "mcp-rabbitmq-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-rabbitmq-server",
      "description": "Community MCP server for RabbitMQ — exposes queues, exchanges, and the management API so LLMs can inspect broker health, publish test messages, and trace message flow in AMQP-based systems.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP RabbitMQ",
        "AMQP MCP",
        "message broker"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Raycast Server",
      "slug": "mcp-raycast-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-raycast-server",
      "description": "A community MCP integration that exposes Raycast extensions and the Raycast AI surface to MCP clients — or conversely lets Raycast consume MCP servers as AI commands.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Raycast",
        "Raycast extension",
        "macOS launcher",
        "AI command"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Razorpay Server",
      "slug": "mcp-razorpay-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-razorpay-server",
      "description": "Community MCP server for Razorpay — India's leading payments platform. Exposes payments, orders, refunds, payouts, and subscriptions so LLM clients can help with billing and reconciliation.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Razorpay",
        "India payments MCP",
        "UPI MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Readwise Server",
      "slug": "mcp-readwise-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-readwise-server",
      "description": "Community MCP server for Readwise and Readwise Reader — exposes highlights, articles, and daily review data so Claude can synthesize and link what a user has been reading.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Readwise",
        "Readwise MCP server",
        "read-later AI",
        "highlights"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Reddit Server",
      "slug": "mcp-reddit-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-reddit-server",
      "description": "A community MCP server that exposes Reddit's API — subreddit listings, search, comment trees, submission posting — to Claude Desktop and other MCP clients using OAuth app credentials.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Reddit",
        "Reddit API",
        "Claude Desktop",
        "community moderation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Redis Server",
      "slug": "mcp-redis-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-redis-server",
      "description": "MCP server that exposes Redis commands — GET, SET, SCAN, pub/sub — as tools for LLM clients. Useful for cache inspection, troubleshooting, and vector search on Redis Stack.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Redis",
        "Redis MCP",
        "Claude Redis",
        "Redis vector MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Registry and Discovery",
      "slug": "mcp-registry-and-discovery",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-registry-and-discovery",
      "description": "How users and agents find Model Context Protocol servers — from the official MCP Registry and the modelcontextprotocol/servers repo to per-client marketplaces in Cursor, Claude Desktop, and Cline.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP registry",
        "MCP discovery",
        "MCP marketplace",
        "MCP servers list",
        "install MCP server"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Deploying Remote MCP Servers (HTTP/SSE)",
      "slug": "mcp-remote-mcp-server-deployment",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-remote-mcp-server-deployment",
      "description": "Guide to hosting an MCP server as a remote HTTP/SSE endpoint — covering transport choice, auth (OAuth 2.1), deployment targets, and Claude Desktop connector setup.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "remote MCP server",
        "MCP HTTP transport",
        "MCP SSE",
        "MCP OAuth"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Roam Research Server",
      "slug": "mcp-roam-research-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-roam-research-server",
      "description": "Community MCP server for Roam Research — bridges Claude with Roam's graph database over the backend API so the LLM can read and write blocks in a user's graph.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Roam",
        "Roam Research MCP",
        "Datalog MCP",
        "networked thought"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Salesforce Server",
      "slug": "mcp-salesforce-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-salesforce-server",
      "description": "MCP server for Salesforce — query accounts, update opportunities, run SOQL, and execute Apex actions. Salesforce's Agentforce platform ships MCP integration; community packages cover smaller use cases.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Salesforce",
        "Agentforce MCP",
        "SOQL MCP",
        "CRM MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "The MCP Sampling Pattern",
      "slug": "mcp-sampling-pattern",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-sampling-pattern",
      "description": "Sampling is the MCP capability where a server asks its client to run an LLM completion on its behalf — powerful for tools that need to reason over their own data without bundling a model.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP sampling",
        "server-initiated completion",
        "agentic pattern",
        "cost control"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Pattern: MCP Sampling — Servers Requesting Completions",
      "slug": "mcp-sampling-server-requests-completions",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-sampling-server-requests-completions",
      "description": "MCP's sampling primitive lets a server ask its client to run an LLM completion on its behalf — enabling agent-in-agent workflows where tools delegate reasoning back to the client's model.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP sampling",
        "agent-in-agent MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP SAP Server",
      "slug": "mcp-sap-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-sap-server",
      "description": "Community MCP server for SAP S/4HANA and SAP ERP — exposes OData services, BAPI calls, and CDS views as tools so Claude can reason over finance, supply-chain, and HR data.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP SAP",
        "SAP S/4HANA MCP",
        "SAP OData MCP",
        "SAP Joule"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Security Best Practices",
      "slug": "mcp-security-best-practices",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-security-best-practices",
      "description": "Practical security checklist for building and deploying MCP servers and clients — prompt-injection defenses, auth hygiene, tool scoping, and audit logging.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP security",
        "MCP prompt injection",
        "MCP OAuth",
        "MCP least privilege"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP SendGrid Server",
      "slug": "mcp-sendgrid-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-sendgrid-server",
      "description": "Community MCP server for Twilio SendGrid — exposes email sending, template management, and analytics APIs so LLM clients can compose transactional emails and investigate deliverability.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP SendGrid",
        "transactional email MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Sentry Server",
      "slug": "mcp-sentry-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-sentry-server",
      "description": "The Sentry MCP server exposes issues, events, releases, and projects from Sentry as MCP tools — so Claude can triage production errors and draft fixes in one loop.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Sentry",
        "Sentry MCP server",
        "error triage agent",
        "Claude Sentry",
        "production debugging"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Server Authentication Patterns",
      "slug": "mcp-server-authentication",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-server-authentication",
      "description": "How Model Context Protocol servers authenticate — from plain env-var API keys on stdio to full OAuth 2.1 on remote Streamable HTTP endpoints, with scope and audit guidance.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP authentication",
        "MCP OAuth",
        "MCP API key",
        "OAuth 2.1 MCP",
        "MCP scopes"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP ServiceNow Server",
      "slug": "mcp-servicenow-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-servicenow-server",
      "description": "Community MCP server that exposes ServiceNow incidents, change requests, CMDB records, and the Now Platform Table API as tools — enabling Claude to triage tickets, update CIs, and orchestrate ITSM workflows.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP ServiceNow",
        "ServiceNow MCP server",
        "ITSM automation",
        "Now Platform LLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP SharePoint Server",
      "slug": "mcp-sharepoint-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-sharepoint-server",
      "description": "MCP server for Microsoft SharePoint — expose site contents, document libraries, lists, and search via Microsoft Graph so Claude can answer grounded questions across an organization's intranet.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP SharePoint",
        "SharePoint MCP server",
        "Microsoft Graph MCP",
        "intranet RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Shopify Server",
      "slug": "mcp-shopify-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-shopify-server",
      "description": "A community MCP server that exposes the Shopify Admin API — products, orders, customers, fulfillment — to Claude Desktop and other MCP clients over stdio transport.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Shopify",
        "ecommerce",
        "Admin API",
        "products"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Sketch Server",
      "slug": "mcp-sketch-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-sketch-server",
      "description": "Community MCP server for Sketch — parses .sketch files and Sketch Cloud libraries to expose symbols, layers, and design tokens to Claude for automated documentation and code handoff.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Sketch",
        "Sketch MCP server",
        "Sketch Cloud MCP",
        "design system"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Slack Server",
      "slug": "mcp-slack-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-slack-server",
      "description": "The Slack MCP server lets Claude and other MCP clients post messages, read channels, and search history in a Slack workspace — authenticated with a Slack bot token.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Slack",
        "Slack MCP server",
        "Slack bot token",
        "agent Slack integration",
        "Claude Slack"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Snowflake Server",
      "slug": "mcp-snowflake-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-snowflake-server",
      "description": "MCP server exposing Snowflake SQL execution, schema browsing, and warehouse metadata to LLM clients. Snowflake ships an official implementation; community variants add Cortex and Snowpark bindings.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Snowflake",
        "Snowflake MCP",
        "Cortex MCP",
        "data warehouse MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Splunk Server",
      "slug": "mcp-splunk-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-splunk-server",
      "description": "Community MCP server for Splunk Enterprise and Splunk Cloud — lets LLM clients run SPL searches, list saved searches, and fetch results from Splunk for incident triage and log exploration.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Splunk",
        "SIEM MCP",
        "SPL"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Spotify Server",
      "slug": "mcp-spotify-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-spotify-server",
      "description": "A community MCP server that exposes Spotify Web API endpoints — search tracks, control playback, manage playlists — to Claude Desktop and other MCP clients.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Spotify",
        "Spotify API",
        "Claude Desktop",
        "playback control"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP SQLite Server",
      "slug": "mcp-sqlite-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-sqlite-server",
      "description": "The SQLite MCP server lets Claude and other MCP clients query a local SQLite database file — ideal for notebooks, analytics prototypes, and local-first apps that want an LLM data assistant.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP SQLite",
        "SQLite MCP server",
        "local text-to-SQL",
        "Claude Desktop SQLite",
        "LLM analytics"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Square Server",
      "slug": "mcp-square-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-square-server",
      "description": "Community MCP server for Block's Square — exposes payments, orders, catalog, and customer APIs so LLM clients can help retail and restaurant merchants manage commerce from chat.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Square",
        "Block Square",
        "POS MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Pattern: MCP Streaming and Progress Notifications",
      "slug": "mcp-streaming-and-progress-notifications",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-streaming-and-progress-notifications",
      "description": "How to emit progress and partial results from long-running MCP tool calls using Streamable HTTP, SSE transports, and the progress notification primitives defined in the MCP spec.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP streaming",
        "progress notifications",
        "SSE"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Stripe Server",
      "slug": "mcp-stripe-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-stripe-server",
      "description": "The Stripe MCP server gives Claude and other MCP clients scoped access to Stripe customers, payments, invoices, and subscriptions via a restricted API key.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Stripe",
        "Stripe MCP server",
        "Stripe agent",
        "restricted API key",
        "billing LLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Supabase Server",
      "slug": "mcp-supabase-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-supabase-server",
      "description": "A community-and-official MCP server for Supabase — lets Claude Desktop and other MCP clients query Postgres, inspect schemas, manage Auth users, and read Storage buckets in a Supabase project.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Supabase",
        "Postgres",
        "Auth",
        "Storage"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Telegram Server",
      "slug": "mcp-telegram-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-telegram-server",
      "description": "A community MCP server that exposes Telegram Bot API operations — send messages, read chats, forward updates — to Claude Desktop and other MCP clients over stdio.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Telegram",
        "Telegram bot",
        "Claude Desktop",
        "notification"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Testing MCP Servers with the Inspector Tool",
      "slug": "mcp-testing-and-inspector-tool",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-testing-and-inspector-tool",
      "description": "How to use @modelcontextprotocol/inspector — the official browser-based testing UI — to exercise tools, resources, and prompts of any MCP server during development.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP inspector",
        "MCP testing",
        "modelcontextprotocol inspector",
        "MCP debug"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP TikTok Server",
      "slug": "mcp-tiktok-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-tiktok-server",
      "description": "Community MCP server for the TikTok for Business and Content Posting APIs — exposes video uploads, insights, and creator account tools so LLM clients can help manage TikTok presence.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP TikTok",
        "short video MCP",
        "social MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP TimescaleDB Server",
      "slug": "mcp-timescale-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-timescale-server",
      "description": "Community MCP server for TimescaleDB — the PostgreSQL extension for time-series. Exposes hypertable schema, continuous aggregates, and SQL execution so LLMs can explore high-volume time-ordered data.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP TimescaleDB",
        "MCP server",
        "time series Postgres"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Todoist Server",
      "slug": "mcp-todoist-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-todoist-server",
      "description": "Community MCP server for Todoist — exposes tasks, projects, filters, and labels via the Todoist REST API so Claude can triage, schedule, and complete personal work.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Todoist",
        "Todoist MCP server",
        "AI task manager",
        "productivity MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Transports — stdio vs SSE vs Streamable HTTP",
      "slug": "mcp-transports-stdio-vs-sse",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-transports-stdio-vs-sse",
      "description": "Compare the three official Model Context Protocol transports — stdio, Server-Sent Events, and the newer Streamable HTTP — and learn when to pick each for local tools vs remote multi-tenant servers.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP transport",
        "stdio MCP",
        "SSE MCP",
        "Streamable HTTP MCP",
        "MCP remote server"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Trino Server",
      "slug": "mcp-trino-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-trino-server",
      "description": "Community MCP server for Trino (formerly PrestoSQL) — federated SQL engine. Exposes catalog and schema introspection plus query execution across heterogeneous data sources like Hive, Iceberg, and Postgres.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Trino",
        "MCP server",
        "federated SQL",
        "lakehouse MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Twilio Server",
      "slug": "mcp-twilio-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-twilio-server",
      "description": "A community MCP server that exposes Twilio — send SMS and WhatsApp messages, make calls, look up numbers, read logs — to Claude Desktop over stdio for communication workflows.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Twilio",
        "SMS",
        "WhatsApp",
        "communication API"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Twitter / X Server",
      "slug": "mcp-twitter-x-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-twitter-x-server",
      "description": "A community MCP server that exposes the Twitter / X API — tweet search, post tweets, read user timelines — to Claude Desktop and other MCP clients over stdio transport.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Twitter",
        "MCP X",
        "X API",
        "social media MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP HashiCorp Vault Server",
      "slug": "mcp-vault-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-vault-server",
      "description": "Community MCP server for HashiCorp Vault — lets LLM clients list secret engines, read non-sensitive metadata, and perform audited lookups with scoped policies. Designed for read-only diagnostics.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Vault",
        "HashiCorp Vault",
        "secret management MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Vercel Server",
      "slug": "mcp-vercel-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-vercel-server",
      "description": "Vercel's official MCP server exposes project, deployment, and log operations to LLM clients. Pairs naturally with v0 and the Vercel AI SDK for agentic deploy workflows.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Vercel",
        "Vercel MCP",
        "v0 MCP",
        "deploy MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP vs OpenAPI Tool Calling",
      "slug": "mcp-vs-openapi-tools",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-vs-openapi-tools",
      "description": "How Model Context Protocol differs from OpenAPI-powered tool calling — discovery, transport, stateful sessions, prompts and resources — and when to pick each approach.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP vs OpenAPI",
        "tool calling",
        "function calling",
        "LLM integration"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Client: Visual Studio Code",
      "slug": "mcp-vscode-client",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-vscode-client",
      "description": "Overview of MCP support in Visual Studio Code — both through GitHub Copilot Chat's Agent Mode and community extensions. VS Code can consume MCP servers and expose editor context.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP VS Code",
        "Copilot Agent Mode"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Weights & Biases Server",
      "slug": "mcp-wandb-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-wandb-server",
      "description": "Community MCP server for Weights & Biases — exposes runs, sweeps, artifacts, and reports via the W&B SDK so Claude can summarise experiments and compare training jobs.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP W&B",
        "Weights & Biases MCP",
        "experiment tracking",
        "ML ops MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Webflow Server",
      "slug": "mcp-webflow-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-webflow-server",
      "description": "A community MCP server that exposes the Webflow Data API — CMS collections, items, sites, publishing — to Claude Desktop over stdio transport for headless-content workflows.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Webflow",
        "CMS",
        "headless content",
        "site publishing"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Windsurf as an MCP Client",
      "slug": "mcp-windsurf-client",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-windsurf-client",
      "description": "Codeium's Windsurf editor is an MCP-compatible client — it can launch MCP servers from its config and expose their tools inside Cascade, the agentic coding interface.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "Windsurf",
        "Codeium",
        "MCP client",
        "Cascade"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP WooCommerce Server",
      "slug": "mcp-woocommerce-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-woocommerce-server",
      "description": "Community MCP server for WooCommerce — the WordPress e-commerce plugin. Exposes products, orders, customers, and coupons via the REST API so LLM clients can help store owners.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP WooCommerce",
        "WordPress e-commerce MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP WordPress Server",
      "slug": "mcp-wordpress-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-wordpress-server",
      "description": "A community MCP server that exposes WordPress's REST API — posts, pages, media, categories, users — to Claude Desktop over stdio for AI-assisted content editing on self-hosted or WordPress.com sites.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP WordPress",
        "CMS",
        "REST API",
        "content editor"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Workato Server",
      "slug": "mcp-workato-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-workato-server",
      "description": "Workato-published MCP server that lets Claude invoke Workato recipes and on-prem connectors — turning a Workato account into a library of pre-built, governed enterprise automations.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Workato",
        "Workato MCP",
        "iPaaS MCP",
        "enterprise automation AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Workday Server",
      "slug": "mcp-workday-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-workday-server",
      "description": "Community MCP server that bridges Claude and other MCP clients with Workday HCM and Financials — surfacing employee records, time-off, expense, and reporting APIs as safe, auditable tools.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Workday",
        "Workday HCM MCP",
        "Workday automation",
        "HR AI agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP YouTube Server",
      "slug": "mcp-youtube-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-youtube-server",
      "description": "A community MCP server that exposes YouTube Data API operations — search videos, fetch transcripts, read channel metadata — to Claude Desktop and other MCP clients over stdio.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP YouTube",
        "YouTube transcript",
        "Claude Desktop",
        "video summarization"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Zapier Server",
      "slug": "mcp-zapier-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-zapier-server",
      "description": "An integration server that exposes Zapier's catalog of 6,000+ app actions to MCP clients — lets Claude Desktop invoke any Zapier-supported service through a unified MCP tool interface.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Zapier",
        "AI Actions",
        "workflow automation",
        "SaaS integration"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Zed Editor as an MCP Client",
      "slug": "mcp-zed-editor-client",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-zed-editor-client",
      "description": "Zed, the high-performance Rust-based code editor, ships first-class MCP client support — configure servers via settings.json and Zed's Assistant Agent can call them in its tool loop.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "Zed MCP",
        "Zed editor client",
        "context servers",
        "Zed Assistant",
        "Rust code editor MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MCP Zoom Server",
      "slug": "mcp-zoom-server",
      "url": "https://learn.engineering.vips.edu/mcp/mcp-zoom-server",
      "description": "Community MCP server for Zoom — exposes meeting scheduling, recording listing, transcript fetching, and participant data so LLMs can summarize meetings and manage calendars.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP Zoom",
        "meeting transcripts MCP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "What is the Model Context Protocol (MCP)?",
      "slug": "what-is-model-context-protocol",
      "url": "https://learn.engineering.vips.edu/mcp/what-is-model-context-protocol",
      "description": "Model Context Protocol (MCP) is an open standard from Anthropic that lets LLM clients connect to tools, resources, and prompts through a uniform server interface. Think 'USB-C for AI apps'.",
      "category": "mcp",
      "categoryTitle": "Model Context Protocol",
      "pillar": "Creativity",
      "keywords": [
        "MCP",
        "Model Context Protocol",
        "Anthropic",
        "tool use",
        "agents",
        "standard"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "A2A Agent Card — Capability Manifest Spec",
      "slug": "a2a-agent-card-spec",
      "url": "https://learn.engineering.vips.edu/agent-protocols/a2a-agent-card-spec",
      "description": "The Agent Card is A2A's capability manifest — a JSON document an agent publishes describing its name, skills, endpoints, auth requirements, and supported transports.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Agent Card",
        "A2A",
        "capability manifest",
        "agent discovery",
        "well-known URL"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "A2A Authentication — OAuth & Beyond",
      "slug": "a2a-authentication-oauth",
      "url": "https://learn.engineering.vips.edu/agent-protocols/a2a-authentication-oauth",
      "description": "A2A leans on standard web auth — primarily OAuth 2.0 bearer tokens — so agents authenticate to one another the same way services do, with API keys and mTLS as alternatives.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "A2A",
        "authentication",
        "OAuth",
        "mTLS",
        "bearer token",
        "agent identity"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "A2A Task Handoff — Semantics & Lifecycle",
      "slug": "a2a-task-handoff",
      "url": "https://learn.engineering.vips.edu/agent-protocols/a2a-task-handoff",
      "description": "Task handoff in A2A describes the full lifecycle of delegating work to another agent: create, assign, run, stream updates, return result — with support for long-running and multi-turn tasks.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "A2A",
        "task handoff",
        "task lifecycle",
        "long-running tasks",
        "delegation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Adept ACT-1 — Action Transformer",
      "slug": "adept-act-1-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/adept-act-1-agent",
      "description": "Adept's ACT-1 was a 2022 action-transformer model that pioneered browser-controlling foundation models — a major intellectual precursor to modern computer-use agents.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Adept",
        "ACT-1",
        "action transformer",
        "computer use",
        "browser agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AG-UI — Agent-User Interaction Protocol",
      "slug": "ag-ui-protocol",
      "url": "https://learn.engineering.vips.edu/agent-protocols/ag-ui-protocol",
      "description": "AG-UI is an open event-based protocol for how a running agent streams its thoughts, tool calls, and partial outputs to a user-facing UI — the agent-to-UI counterpart of A2A.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "AG-UI",
        "agent UI",
        "streaming",
        "events",
        "agent-user"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Blackboard Pattern for Multi-Agent Systems",
      "slug": "agent-blackboard-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-blackboard-pattern",
      "description": "The blackboard pattern uses a shared workspace where agents read and write partial results — a classical AI architecture now finding new life in LLM-agent systems.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "blackboard",
        "shared memory",
        "Hearsay-II",
        "multi-agent",
        "classical AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Cache-and-Memoize Pattern",
      "slug": "agent-cache-and-memoize-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-cache-and-memoize-pattern",
      "description": "Caching tool-call results and memoizing identical LLM prompts is how production agents cut cost and latency by 50–90% — turning repeated external calls into instant local lookups.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "caching",
        "memoization",
        "prompt caching",
        "agent pattern",
        "cost optimization"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Cost and Token Budget Patterns",
      "slug": "agent-cost-and-token-budgets",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-cost-and-token-budgets",
      "description": "Agents can burn thousands of dollars in a single run if left unchecked — explicit token and cost budgets, per-step guards, context pruning, and cheaper-model routing are the patterns production teams use to keep spend sane.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "token budget",
        "cost control",
        "prompt caching",
        "model routing",
        "FinOps"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Credential Vault Pattern",
      "slug": "agent-credential-vault-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-credential-vault-pattern",
      "description": "The credential-vault pattern stores secrets — API keys, OAuth tokens, passwords — outside the agent's memory and injects them only into specific tool calls, limiting blast radius if the agent is compromised.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "credential vault",
        "secrets",
        "agent security",
        "HashiCorp Vault"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Episodic Memory Pattern",
      "slug": "agent-episodic-memory-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-episodic-memory-pattern",
      "description": "Episodic memory stores specific past events — 'on March 3, user asked X, agent did Y' — letting an agent recall concrete past interactions rather than only general facts.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "episodic memory",
        "agent memory",
        "temporal memory",
        "long-term memory"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "HaluEval — Hallucination Evaluation Benchmark",
      "slug": "agent-factuality-benchmark-halueval",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-factuality-benchmark-halueval",
      "description": "HaluEval is a large-scale benchmark of hallucination examples across QA, dialogue, and summarization used to measure how often LLM agents invent facts versus ground them in retrieved sources.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "HaluEval",
        "hallucination",
        "factuality",
        "RAG",
        "benchmark"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Human-in-the-Loop (HITL) Pattern",
      "slug": "agent-human-in-the-loop-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-human-in-the-loop-pattern",
      "description": "Human-in-the-loop is the design pattern where agents pause for human approval, correction, or input at specific checkpoints — trading some autonomy for safety, accuracy, and regulatory fit in high-stakes workflows.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "human-in-the-loop",
        "HITL",
        "agent pattern",
        "approval",
        "interrupt"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Identity — OIDC and OAuth 2.1",
      "slug": "agent-identity-oidc-oauth-2-1",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-identity-oidc-oauth-2-1",
      "description": "Agent identity uses OIDC and OAuth 2.1 to give AI agents their own cryptographically-verifiable identities — separate from user identities — with scoped permissions and full audit trails.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "agent identity",
        "OIDC",
        "OAuth",
        "delegation",
        "agent passport"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Map-Reduce Pattern",
      "slug": "agent-map-reduce-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-map-reduce-pattern",
      "description": "Map-reduce for agents: split a large input into chunks, process each in parallel with a 'map' agent, then combine results with a 'reduce' agent — the classic recipe for long-document work.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "map-reduce",
        "parallel",
        "long-document",
        "agent pattern"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Mesh Networking Pattern",
      "slug": "agent-mesh-networking-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-mesh-networking-pattern",
      "description": "Agent mesh networking is an architecture where specialized agents discover each other via a registry, call each other directly over a standard protocol (A2A, MCP), and compose dynamically without central orchestration.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "agent mesh",
        "microservices",
        "A2A",
        "composition",
        "registry"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Network Protocol (ANP)",
      "slug": "agent-network-protocol-anp",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-network-protocol-anp",
      "description": "ANP is an open agent-to-agent protocol that treats agents as first-class peers on a decentralized network — using DIDs for identity and JSON-LD for capability discovery.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "ANP",
        "Agent Network Protocol",
        "DID",
        "decentralized agents",
        "A2A",
        "JSON-LD"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent PII Redaction Layer",
      "slug": "agent-pii-redaction-layer",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-pii-redaction-layer",
      "description": "A PII redaction layer sits between an agent and its inputs/outputs, scrubbing personally-identifiable information — names, SSNs, card numbers — before it reaches the LLM or leaves the system.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "PII redaction",
        "data loss prevention",
        "privacy",
        "GDPR",
        "HIPAA"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Pipeline Pattern",
      "slug": "agent-pipeline-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-pipeline-pattern",
      "description": "The pipeline pattern chains agents in a fixed sequence, each transforming the previous agent's output — a Unix-pipe style composition that favors determinism over autonomy.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "pipeline",
        "agent pattern",
        "composition",
        "workflow"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Procedural Memory Pattern",
      "slug": "agent-procedural-memory-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-procedural-memory-pattern",
      "description": "Procedural memory stores learned how-to knowledge — reusable skill snippets, successful tool-call sequences, corrected mistakes — that the agent can retrieve and apply to future similar tasks.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "procedural memory",
        "skill library",
        "agent learning",
        "Voyager"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Prompt-Injection Defense",
      "slug": "agent-prompt-injection-defense",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-prompt-injection-defense",
      "description": "Prompt-injection defense is a layered set of techniques — input sanitization, instruction hierarchies, capability scoping, output firewalls — used to prevent attackers from hijacking an agent via untrusted text.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "prompt injection",
        "LLM security",
        "OWASP",
        "agent security"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Rate Limiting and Quotas",
      "slug": "agent-rate-limiting-and-quotas",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-rate-limiting-and-quotas",
      "description": "Rate limiting and quotas bound an agent's cost, blast radius, and abuse potential by capping tool calls, token spend, and external API use per user, session, or time window.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "rate limiting",
        "quotas",
        "cost control",
        "circuit breaker",
        "agent ops"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Retry-with-Backoff Pattern",
      "slug": "agent-retry-with-backoff-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-retry-with-backoff-pattern",
      "description": "Retry-with-backoff is the core resilience pattern for agent tool calls: on transient failure, wait an exponentially growing interval before retrying, with jitter to avoid thundering-herd retries.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "retry",
        "backoff",
        "resilience",
        "agent pattern",
        "error handling"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Router / Classifier Pattern",
      "slug": "agent-router-classifier-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-router-classifier-pattern",
      "description": "The router pattern puts a lightweight classifier at the front door of an agent system, dispatching each request to the cheapest model or most specialized sub-agent that can handle it.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "router",
        "classifier",
        "model routing",
        "agent pattern",
        "cost optimization"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "SafeBench — Agent Safety Benchmark",
      "slug": "agent-safety-benchmark-safebench",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-safety-benchmark-safebench",
      "description": "SafeBench is a benchmark suite that stress-tests autonomous agents on harmful-instruction compliance, indirect prompt injection, unsafe tool use, and jailbreak robustness across standardized scenarios.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "SafeBench",
        "agent safety",
        "prompt injection",
        "benchmark",
        "AI safety"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Sandboxing and Safety Patterns",
      "slug": "agent-sandboxing-and-safety",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-sandboxing-and-safety",
      "description": "Sandboxing is the foundational safety pattern for agents that run code or browse the web — isolating the agent's execution environment so compromised or hallucinating runs cannot damage host systems or exfiltrate data.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "sandboxing",
        "agent safety",
        "microVM",
        "isolation",
        "security"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Self-Critique Pattern",
      "slug": "agent-self-critique-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-self-critique-pattern",
      "description": "Self-critique is an agent design pattern where the agent reviews and scores its own draft output against a rubric or checklist before returning it, catching errors that slipped past the initial generation.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "self-critique",
        "reflexion",
        "self-refine",
        "agent pattern"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Semantic Memory Pattern",
      "slug": "agent-semantic-memory-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-semantic-memory-pattern",
      "description": "Semantic memory stores generalized facts — 'the user prefers Python', 'our prod DB is Postgres' — as structured knowledge the agent can retrieve and use in future interactions.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "semantic memory",
        "knowledge graph",
        "agent memory",
        "vector store"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent State and Checkpointing",
      "slug": "agent-state-and-checkpointing",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-state-and-checkpointing",
      "description": "Production agents need durable state and checkpoints — snapshots of memory, tool outputs, and plan steps — so long-running tasks survive crashes, timeouts, and human interruptions without starting over.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "checkpointing",
        "agent state",
        "durable execution",
        "LangGraph",
        "Temporal"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Streaming / Partial Results Pattern",
      "slug": "agent-streaming-partial-results-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-streaming-partial-results-pattern",
      "description": "The streaming pattern surfaces partial agent output — token-by-token text, interim tool results, status events — to the user as it happens, making multi-second agent tasks feel responsive.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "streaming",
        "SSE",
        "WebSocket",
        "agent UX",
        "partial results"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Tool Permissioning Patterns",
      "slug": "agent-tool-permissioning",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-tool-permissioning",
      "description": "Tool permissioning is the discipline of granting agents the narrowest possible capability set — per-tool allow-lists, confirmation prompts for destructive operations, scoped OAuth, and user-in-the-loop approvals.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "permissioning",
        "tool access",
        "least privilege",
        "agent safety"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent Voting / Ensemble Pattern",
      "slug": "agent-voting-ensemble-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agent-voting-ensemble-pattern",
      "description": "The voting-ensemble pattern runs N agents in parallel on the same task and aggregates their answers by majority vote or a judge model, trading cost for robustness on high-stakes decisions.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "ensemble",
        "voting",
        "self-consistency",
        "agent pattern"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AgentBench: Multi-Environment LLM Agent Benchmark",
      "slug": "agentbench-benchmark",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agentbench-benchmark",
      "description": "AgentBench from Tsinghua evaluates LLMs as agents across eight distinct environments — OS, database, web shopping, games, and more — producing a single comparable score for agentic capability.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "AgentBench",
        "benchmark",
        "LLM agents",
        "Tsinghua",
        "evaluation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AgentOps: Observability Platform for AI Agents",
      "slug": "agentops-observability-platform",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agentops-observability-platform",
      "description": "AgentOps is an open-source observability platform for LLM agents that captures every tool call, token, cost, and latency span — giving production teams tracing, session replay, and evals.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "AgentOps",
        "observability",
        "LLM ops",
        "tracing",
        "evals"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agents ↔ MCP Interoperability",
      "slug": "agents-mcp-interop",
      "url": "https://learn.engineering.vips.edu/agent-protocols/agents-mcp-interop",
      "description": "MCP (Model Context Protocol) has become the de-facto standard for exposing tools and data to agents — this entry covers how agent frameworks interoperate with MCP servers in practice.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "MCP",
        "Model Context Protocol",
        "interoperability",
        "tool use"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Anchor Browser: Hosted Browser Infrastructure for Agents",
      "slug": "anchor-browser-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/anchor-browser-agent",
      "description": "Anchor Browser provides hosted, persistent, and programmatically-controllable browsers for AI agents — with built-in auth, CAPTCHA handling, session recording, and a standard CDP API.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Anchor",
        "browser infrastructure",
        "agents",
        "headless browser"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Subagents in Production",
      "slug": "anthropic-claude-subagents-in-production",
      "url": "https://learn.engineering.vips.edu/agent-protocols/anthropic-claude-subagents-in-production",
      "description": "Claude subagents have moved from coding-CLI curiosity to production pattern — powering Anthropic's own research agent and an increasing share of real-world agent deployments.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Claude",
        "Anthropic",
        "subagents",
        "research agent",
        "production",
        "orchestrator"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Anthropic Computer Use Agent",
      "slug": "anthropic-computer-use-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/anthropic-computer-use-agent",
      "description": "Computer Use is Anthropic's API capability that lets Claude see the screen, move the mouse, and type — enabling the model to operate general-purpose software GUIs like a human user.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Computer Use",
        "Anthropic",
        "Claude",
        "computer-using agent",
        "GUI automation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Engineer Foundation Agent Protocol (aka Arcadia)",
      "slug": "arcadia-agent-protocol",
      "url": "https://learn.engineering.vips.edu/agent-protocols/arcadia-agent-protocol",
      "description": "The AI Engineer Foundation Agent Protocol is an open, vendor-neutral REST specification for running and controlling an agent — start a task, stream steps, list artifacts — backed by an open-source reference server.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Agent Protocol",
        "AI Engineer Foundation",
        "Arcadia",
        "REST",
        "runtime"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Arize Phoenix for Agent Tracing and Evals",
      "slug": "arize-phoenix-agent-tracing",
      "url": "https://learn.engineering.vips.edu/agent-protocols/arize-phoenix-agent-tracing",
      "description": "Arize Phoenix is an open-source LLM observability tool that traces agent runs via OpenTelemetry, clusters failures by embedding, and runs LLM-as-judge evals — all locally or self-hosted.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Phoenix",
        "Arize",
        "observability",
        "OpenInference",
        "agent tracing"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AutoGen GroupChat",
      "slug": "autogen-groupchat",
      "url": "https://learn.engineering.vips.edu/agent-protocols/autogen-groupchat",
      "description": "AutoGen's GroupChat puts several specialist agents around a virtual table with a manager that picks the next speaker — a flexible many-agent conversation primitive from Microsoft Research.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "AutoGen",
        "GroupChat",
        "Microsoft Research",
        "multi-agent",
        "speaker selection"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AutoGPT (Original 2023)",
      "slug": "autogpt-original-2023",
      "url": "https://learn.engineering.vips.edu/agent-protocols/autogpt-original-2023",
      "description": "AutoGPT, released March 2023, was the first viral autonomous agent framework — a Python script that chained GPT-4 calls with tools to pursue goals without human steps, sparking the agent-framework era.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "AutoGPT",
        "autonomous agent",
        "history",
        "GPT-4"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "BabyAGI (Original 2023)",
      "slug": "babyagi-original-2023",
      "url": "https://learn.engineering.vips.edu/agent-protocols/babyagi-original-2023",
      "description": "BabyAGI, released April 2023 by Yohei Nakajima, was a ~140-line Python script demonstrating task decomposition + prioritization + execution with GPT-4 — one of the first autonomous agent patterns shared widely.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "BabyAGI",
        "autonomous agent",
        "history",
        "Yohei Nakajima"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Bolt.new: In-Browser Full-Stack Coding Agent",
      "slug": "bolt-new-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/bolt-new-agent",
      "description": "Bolt.new by StackBlitz is a browser-based full-stack coding agent built on WebContainers — it runs Node.js, installs packages, edits files, and previews apps entirely in the browser, then deploys to Netlify.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Bolt.new",
        "StackBlitz",
        "WebContainers",
        "coding agent",
        "full-stack"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Browser Use: Open-Source LLM Browser Agent Framework",
      "slug": "browser-use-framework",
      "url": "https://learn.engineering.vips.edu/agent-protocols/browser-use-framework",
      "description": "Browser Use is an open-source Python library that gives LLM agents structured access to a real Playwright browser — they see the DOM, screenshots, and interactive elements, and act via a typed action space.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Browser Use",
        "Playwright",
        "browser agent",
        "web automation",
        "open source"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Browserbase — Cloud Browser Infrastructure for Agents",
      "slug": "browserbase-cloud-browser-infrastructure",
      "url": "https://learn.engineering.vips.edu/agent-protocols/browserbase-cloud-browser-infrastructure",
      "description": "Browserbase provides headless Chrome browsers in the cloud purpose-built for AI agents, with session recording, stealth mode, file handling, and per-request isolation.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Browserbase",
        "cloud browser",
        "agent infrastructure",
        "web agents"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Claude Code Subagents",
      "slug": "claude-code-subagents",
      "url": "https://learn.engineering.vips.edu/agent-protocols/claude-code-subagents",
      "description": "Claude Code's subagent pattern lets the main Claude agent spawn specialised sub-Claudes with their own prompts, tool allowlists, and contexts — a first-class multi-agent workflow in a coding CLI.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Claude Code",
        "subagents",
        "Anthropic",
        "orchestrator-worker",
        "coding agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Cognigy — Enterprise Conversational Agent Platform",
      "slug": "cognigy-enterprise-agent-platform",
      "url": "https://learn.engineering.vips.edu/agent-protocols/cognigy-enterprise-agent-platform",
      "description": "Cognigy is an enterprise conversational AI platform for contact centers that builds voice and chat agents with low-code flows, LLM grounding, and deep telephony and CCaaS integration.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Cognigy",
        "conversational AI",
        "contact center",
        "CCaaS"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Cognition Devin: Autonomous Software Engineer Agent",
      "slug": "cognition-devin-deep-dive",
      "url": "https://learn.engineering.vips.edu/agent-protocols/cognition-devin-deep-dive",
      "description": "Devin is Cognition's autonomous software-engineer agent that plans long-horizon coding tasks, browses documentation, executes shell commands, and ships pull requests — the prototype of the fully-autonomous SWE agent category.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Devin",
        "Cognition",
        "coding agent",
        "autonomous agent",
        "software engineer"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "CrewAI Hierarchical Process",
      "slug": "crewai-hierarchical-process",
      "url": "https://learn.engineering.vips.edu/agent-protocols/crewai-hierarchical-process",
      "description": "CrewAI's hierarchical process puts a manager agent in charge of a crew — assigning tasks, reviewing outputs, and iterating — contrasting with its simpler sequential process.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "CrewAI",
        "hierarchical process",
        "manager agent",
        "multi-agent",
        "crew"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Cursor Composer: Multi-File Agentic Editor",
      "slug": "cursor-composer-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/cursor-composer-agent",
      "description": "Cursor Composer (Agent mode) is the multi-file, multi-step coding agent inside the Cursor IDE — it plans edits across files, runs shell commands, and iterates on tests without leaving the editor.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Cursor",
        "Composer",
        "coding agent",
        "IDE agent",
        "pair programming"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Multi-Agent Debate Pattern",
      "slug": "debate-agent-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/debate-agent-pattern",
      "description": "In the debate pattern, two or more agents argue different positions on a problem before a judge agent adjudicates — a technique shown to improve reasoning accuracy on hard problems.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "debate",
        "multi-agent",
        "reasoning",
        "adversarial",
        "judge agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Deep Research Agent Pattern",
      "slug": "deep-research-agent-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/deep-research-agent-pattern",
      "description": "Deep research is a now-standard agent pattern — a lead agent plans a research question, dispatches parallel sub-agents to explore, synthesises findings, and cites sources.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "deep research",
        "research agent",
        "Perplexity",
        "Gemini",
        "ChatGPT",
        "Claude"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Devin — Cognition's Autonomous Coding Agent",
      "slug": "devin-autonomous-coding-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/devin-autonomous-coding-agent",
      "description": "Devin, from Cognition AI, was the first widely-publicised autonomous coding agent — a long-running agent that plans, edits, tests, and ships code with human review at gates.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Devin",
        "Cognition AI",
        "autonomous agent",
        "coding agent",
        "async teammate"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Enterprise DevOps / SRE Agent",
      "slug": "enterprise-devops-sre-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/enterprise-devops-sre-agent",
      "description": "A DevOps/SRE agent triages alerts, investigates incidents, proposes (or executes) fixes, and writes postmortems — augmenting on-call engineers with always-on log/metric correlation.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "DevOps agent",
        "SRE agent",
        "incident response",
        "AIOps"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Enterprise Finance Analyst Agent",
      "slug": "enterprise-finance-analyst-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/enterprise-finance-analyst-agent",
      "description": "A finance analyst agent pulls data from ERP, data warehouses, and market sources, builds models, and drafts variance and scenario analyses — augmenting FP&A and investment teams.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "finance agent",
        "FP&A",
        "analyst agent",
        "Hebbia",
        "Rogo"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Enterprise HR / Recruiting Agent",
      "slug": "enterprise-hr-recruiting-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/enterprise-hr-recruiting-agent",
      "description": "A recruiting agent sources candidates, screens resumes, drafts outreach, schedules interviews, and summarizes feedback — managing the top of the hiring funnel with bias auditing built in.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "HR agent",
        "recruiting agent",
        "ATS",
        "bias audit"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Enterprise Legal Research Agent",
      "slug": "enterprise-legal-research-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/enterprise-legal-research-agent",
      "description": "A legal research agent searches case law, statutes, and firm documents, drafts memoranda with citations, and flags relevant precedents — augmenting associates on research-heavy workflows.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "legal agent",
        "legal research",
        "Harvey",
        "CoCounsel"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Enterprise Marketing Campaign Agent",
      "slug": "enterprise-marketing-campaign-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/enterprise-marketing-campaign-agent",
      "description": "A marketing campaign agent plans campaigns, drafts creative across channels, segments audiences, launches in ad platforms, and reports on performance — closing the loop on optimization.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "marketing agent",
        "campaign agent",
        "Jasper",
        "Writer"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Enterprise Sales Agent (SDR)",
      "slug": "enterprise-sales-agent-sdr",
      "url": "https://learn.engineering.vips.edu/agent-protocols/enterprise-sales-agent-sdr",
      "description": "An enterprise SDR agent autonomously researches accounts, drafts personalized outreach, books meetings, and updates the CRM — replacing or augmenting the first-line sales-development role.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "SDR agent",
        "sales agent",
        "enterprise agent",
        "outreach"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Enterprise Support Agent (Tier 1)",
      "slug": "enterprise-support-agent-tier-1",
      "url": "https://learn.engineering.vips.edu/agent-protocols/enterprise-support-agent-tier-1",
      "description": "A Tier-1 support agent autonomously resolves the bulk of inbound customer issues — password resets, billing questions, order status, how-to queries — and cleanly escalates the rest to humans.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "support agent",
        "customer service AI",
        "Tier 1",
        "deflection"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "FIPA ACL — Agent Communication Language (historical)",
      "slug": "fipa-acl",
      "url": "https://learn.engineering.vips.edu/agent-protocols/fipa-acl",
      "description": "FIPA ACL is the late-1990s IEEE/FIPA standard agent communication language — the intellectual ancestor of modern A2A protocols, built on speech-act theory and KQML.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "FIPA",
        "ACL",
        "agent communication language",
        "KQML",
        "speech acts",
        "history"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GAIA Benchmark for General AI Assistants",
      "slug": "gaia-benchmark",
      "url": "https://learn.engineering.vips.edu/agent-protocols/gaia-benchmark",
      "description": "GAIA is a benchmark from Hugging Face and Meta that tests general AI assistants on real-world, multi-step questions requiring reasoning, tool use, and web browsing — designed to be easy for humans and hard for current agents.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "GAIA",
        "benchmark",
        "agents",
        "Hugging Face",
        "Meta",
        "evaluation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Glean — Enterprise Work Agent",
      "slug": "glean-enterprise-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/glean-enterprise-agent",
      "description": "Glean is a work-assistant platform that indexes a company's SaaS stack — Google Drive, Slack, Jira, Notion, Salesforce — and provides search and agents grounded in that internal knowledge.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Glean",
        "enterprise search",
        "work assistant",
        "agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Google A2A (Agent-to-Agent) Protocol",
      "slug": "google-a2a-protocol",
      "url": "https://learn.engineering.vips.edu/agent-protocols/google-a2a-protocol",
      "description": "Google's A2A is an open protocol for agent interoperability — how independently-built agents discover each other, describe their capabilities, and exchange task state.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "A2A",
        "Agent-to-Agent",
        "Google",
        "agent protocol",
        "interoperability"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Jules — Google's Asynchronous Coding Agent",
      "slug": "google-jules-coding-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/google-jules-coding-agent",
      "description": "Jules is Google's asynchronous coding agent, built on Gemini — it clones your repo, plans changes, runs in a cloud VM, and opens a pull request with tests and diffs for review.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Jules",
        "Google",
        "Gemini",
        "async coding agent",
        "Google Labs"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPQA for Agents — Graduate-Level Reasoning Benchmark",
      "slug": "gpqa-for-agents",
      "url": "https://learn.engineering.vips.edu/agent-protocols/gpqa-for-agents",
      "description": "GPQA is a 448-question expert-authored benchmark of graduate-level biology, chemistry, and physics problems used to measure whether agents can reason through genuinely hard, Google-proof scientific questions.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "GPQA",
        "scientific reasoning",
        "agent benchmark",
        "PhD-level"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPT Researcher: Autonomous Research Agent",
      "slug": "gpt-researcher",
      "url": "https://learn.engineering.vips.edu/agent-protocols/gpt-researcher",
      "description": "GPT Researcher is an open-source autonomous research agent that drafts a plan, issues web queries across many sources, deduplicates, and writes a cited research report — all without a human in the loop.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "GPT Researcher",
        "research agent",
        "autonomous agent",
        "open source"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Handoff vs Delegation — A2A Semantic Distinction",
      "slug": "handoff-vs-delegation",
      "url": "https://learn.engineering.vips.edu/agent-protocols/handoff-vs-delegation",
      "description": "Handoff and delegation look similar but differ: in a handoff, control transfers to another agent; in delegation, the original agent waits for a result and keeps control.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "handoff",
        "delegation",
        "control flow",
        "multi-agent",
        "A2A semantics"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Hierarchical Agent Pattern",
      "slug": "hierarchical-agent-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/hierarchical-agent-pattern",
      "description": "The hierarchical pattern stacks orchestrator-worker vertically: a top-level planner delegates to mid-level coordinators, who in turn delegate to leaf worker agents — structured delegation for complex tasks.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "hierarchical",
        "multi-agent",
        "delegation",
        "tree of agents",
        "coordinator"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "IBM Agent Communication Protocol (ACP)",
      "slug": "ibm-acp-protocol",
      "url": "https://learn.engineering.vips.edu/agent-protocols/ibm-acp-protocol",
      "description": "IBM's ACP is an open protocol for agent-to-agent messaging, discovery, and orchestration — developed under the BeeAI project and designed for enterprise-grade multi-agent systems.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "ACP",
        "IBM",
        "BeeAI",
        "Agent Communication Protocol",
        "A2A",
        "Linux Foundation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LangGraph Supervisor Pattern",
      "slug": "langgraph-supervisor-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/langgraph-supervisor-pattern",
      "description": "LangGraph's supervisor pattern uses a top-level supervisor agent that routes messages to specialised worker agents in a graph — the idiomatic LangGraph way to build multi-agent systems.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "LangGraph",
        "supervisor",
        "multi-agent",
        "state graph",
        "LangChain"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LaVague: Large Action Model Web Agent Framework",
      "slug": "lavague-web-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/lavague-web-agent",
      "description": "LaVague is an open-source web agent framework built around a Large Action Model — a model fine-tuned to translate natural-language web instructions into Selenium/Playwright actions.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "LaVague",
        "LAM",
        "Large Action Model",
        "web agent",
        "Selenium"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LongBench — Long-Horizon Agent Benchmark",
      "slug": "long-horizon-benchmark-longbench",
      "url": "https://learn.engineering.vips.edu/agent-protocols/long-horizon-benchmark-longbench",
      "description": "LongBench evaluates agents on tasks that span many steps, long documents, and extended time horizons — where short-horizon benchmarks fail to capture the real difficulty of agent work.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "LongBench",
        "long-horizon",
        "long-context",
        "agent benchmark"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Lovable: Chat-to-App Full-Stack Agent",
      "slug": "lovable-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/lovable-agent",
      "description": "Lovable is a chat-driven full-stack app builder that generates React + Tailwind frontends wired to Supabase backends — turning a natural-language brief into a working, deployable SaaS product.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Lovable",
        "app builder",
        "Supabase",
        "React",
        "low-code agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Manus Agent Platform",
      "slug": "manus-agent-platform",
      "url": "https://learn.engineering.vips.edu/agent-protocols/manus-agent-platform",
      "description": "Manus is a general-purpose agent platform from Monica that gained attention in early 2025 for running long, autonomous browser + compute workflows on behalf of users.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Manus",
        "Monica",
        "autonomous agent",
        "agent platform",
        "browser agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "mem0 — Agent Memory Layer",
      "slug": "mem0-memory-layer",
      "url": "https://learn.engineering.vips.edu/agent-protocols/mem0-memory-layer",
      "description": "mem0 is an open-source memory layer for AI agents that extracts, deduplicates, and retrieves user- and session-scoped facts across multi-turn conversations with a simple SDK.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "mem0",
        "memory",
        "agent memory",
        "persistent memory"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MLE-Bench — Machine Learning Engineering Benchmark",
      "slug": "mle-bench-machine-learning-engineering",
      "url": "https://learn.engineering.vips.edu/agent-protocols/mle-bench-machine-learning-engineering",
      "description": "MLE-Bench is OpenAI's benchmark of 75 real Kaggle competitions used to measure whether agents can perform end-to-end ML engineering: data exploration, feature engineering, model training, and submission.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "MLE-Bench",
        "Kaggle",
        "ML engineering",
        "agent benchmark",
        "OpenAI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MAgent / MAgentBench — Multi-Agent Benchmark",
      "slug": "multi-agent-benchmark-magent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/multi-agent-benchmark-magent",
      "description": "MAgent and its successors benchmark multi-agent systems on cooperative and competitive tasks — negotiation, resource allocation, team coding — where the failure mode is coordination, not individual agent skill.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "MAgent",
        "multi-agent",
        "benchmark",
        "coordination",
        "negotiation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Multi-Agent Interoperability — an overview",
      "slug": "multi-agent-interoperability",
      "url": "https://learn.engineering.vips.edu/agent-protocols/multi-agent-interoperability",
      "description": "A working map of the 2026 agent-interoperability landscape: A2A, ANP (Agent Network Protocol), NLWeb, and how MCP fits as the tool-access layer underneath.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "multi-agent",
        "interoperability",
        "A2A",
        "ANP",
        "NLWeb",
        "agent protocols"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MultiOn: Consumer Web-Action Agent",
      "slug": "multi-on-browser-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/multi-on-browser-agent",
      "description": "MultiOn is a consumer-facing web-action agent that turns natural-language goals into real browser actions — booking tables, filling forms, placing orders — across any public site.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "MultiOn",
        "web agent",
        "action agent",
        "browser automation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "NLWeb — Microsoft's Natural-Language Web Protocol",
      "slug": "nlweb-protocol",
      "url": "https://learn.engineering.vips.edu/agent-protocols/nlweb-protocol",
      "description": "NLWeb is Microsoft's open protocol for turning websites into agent-accessible endpoints by exposing schema.org-backed content as natural-language APIs queryable by any agent.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "NLWeb",
        "Microsoft",
        "agent web",
        "schema.org",
        "MCP",
        "natural language API"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenAI Agents Protocol and Agents SDK",
      "slug": "openai-agents-protocol",
      "url": "https://learn.engineering.vips.edu/agent-protocols/openai-agents-protocol",
      "description": "OpenAI's Agents SDK and the underlying Responses API form an emerging de-facto agents protocol — typed tool calls, handoffs, tracing, and guardrails with portable concepts across providers.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "OpenAI Agents",
        "Responses API",
        "agents SDK",
        "handoffs",
        "protocol"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenAI Evals for Agent Workflows",
      "slug": "openai-evals-for-agents",
      "url": "https://learn.engineering.vips.edu/agent-protocols/openai-evals-for-agents",
      "description": "OpenAI's Evals framework and hosted Evals API let teams define graders, run LLM-as-judge and programmatic evaluations, and track agent quality across prompt, model, and tool changes.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "OpenAI Evals",
        "evaluation",
        "agents",
        "LLM-as-judge",
        "trajectory"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenAI Swarm Framework",
      "slug": "openai-swarm-framework",
      "url": "https://learn.engineering.vips.edu/agent-protocols/openai-swarm-framework",
      "description": "OpenAI Swarm was an educational multi-agent framework focused on lightweight, stateless, peer-to-peer handoffs — the conceptual precursor to the production OpenAI Agents SDK.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "OpenAI",
        "Swarm",
        "multi-agent framework",
        "handoff",
        "Agents SDK precursor"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Orchestrator-Worker Pattern",
      "slug": "orchestrator-worker-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/orchestrator-worker-pattern",
      "description": "The orchestrator-worker pattern assigns a lead agent to plan and route work, while specialised worker agents execute individual steps — the workhorse pattern for most production agent systems.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "orchestrator-worker",
        "multi-agent pattern",
        "planner",
        "router",
        "sub-agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OSWorld: Real Operating System Agent Benchmark",
      "slug": "osworld-benchmark",
      "url": "https://learn.engineering.vips.edu/agent-protocols/osworld-benchmark",
      "description": "OSWorld is a scalable benchmark that evaluates multi-modal agents on real computer tasks across Ubuntu, Windows, and macOS environments — clicking, typing, and navigating GUIs like a human user.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "OSWorld",
        "benchmark",
        "computer use",
        "desktop agents",
        "GUI automation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Perplexity Deep Research",
      "slug": "perplexity-deep-research-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/perplexity-deep-research-agent",
      "description": "Perplexity Deep Research is an autonomous multi-step research agent that browses the web for several minutes, synthesizes dozens of sources, and writes a cited long-form report for a single prompt.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Perplexity",
        "Deep Research",
        "research agent",
        "long-form"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Playwright for AI Agents",
      "slug": "playwright-for-ai-agents",
      "url": "https://learn.engineering.vips.edu/agent-protocols/playwright-for-ai-agents",
      "description": "Playwright is Microsoft's cross-browser automation library — Chromium, Firefox, WebKit — widely used as the deterministic foundation underneath AI-powered browser agents like Stagehand and browser-use.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Playwright",
        "browser automation",
        "agent",
        "Chromium"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Reflection Agent Pattern",
      "slug": "reflection-agent-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/reflection-agent-pattern",
      "description": "Reflection is the pattern where an agent critiques its own output — or has a reviewer agent critique it — before finalising, catching errors that a single forward pass would emit.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "reflection",
        "self-critique",
        "Reflexion",
        "Self-Refine",
        "reviewer agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Rod — Go Browser Automation for Agents",
      "slug": "rod-browser-automation",
      "url": "https://learn.engineering.vips.edu/agent-protocols/rod-browser-automation",
      "description": "Rod is a Go-native Chrome DevTools Protocol library that provides high-performance browser automation without Node dependencies — popular for Go agent backends driving browsers at scale.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Rod",
        "Go",
        "browser automation",
        "CDP",
        "agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Sakana AI Scientist: Fully Automated Research Pipeline",
      "slug": "sakana-ai-scientist-pipeline",
      "url": "https://learn.engineering.vips.edu/agent-protocols/sakana-ai-scientist-pipeline",
      "description": "AI Scientist by Sakana AI is an end-to-end agent pipeline that proposes ML research ideas, writes experiment code, runs experiments, analyzes results, and drafts a LaTeX paper — the first demonstration of fully autonomous ML research.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "AI Scientist",
        "Sakana",
        "automated research",
        "ML research agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Selenium for AI Agents",
      "slug": "selenium-for-ai-agents",
      "url": "https://learn.engineering.vips.edu/agent-protocols/selenium-for-ai-agents",
      "description": "Selenium is the veteran cross-browser automation framework — WebDriver-based, language-agnostic — still used by AI agents operating in enterprise or legacy environments where Playwright isn't an option.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Selenium",
        "WebDriver",
        "browser automation",
        "agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Skyvern: LLM-Driven Browser RPA Agent",
      "slug": "skyvern-rpa-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/skyvern-rpa-agent",
      "description": "Skyvern is an open-source RPA platform that uses LLMs and vision models to automate browser workflows — form fills, portal logins, document uploads — without writing brittle XPath selectors.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Skyvern",
        "RPA",
        "browser agent",
        "automation",
        "open source"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Stagehand — AI Browser Agent Framework",
      "slug": "stagehand-browser-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/stagehand-browser-agent",
      "description": "Stagehand is an open-source browser automation framework from Browserbase that combines deterministic Playwright code with AI-powered steps like act(), extract(), and observe() for resilient web agents.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Stagehand",
        "browser agent",
        "Playwright",
        "web automation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Stanford STORM: Research Agent for Long-Form Articles",
      "slug": "stanford-storm-research-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/stanford-storm-research-agent",
      "description": "STORM is Stanford's open-source research agent that simulates multi-perspective expert interviews to generate Wikipedia-quality long-form articles with citations.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "STORM",
        "Stanford",
        "research agent",
        "Wikipedia",
        "long-form"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Swarm Pattern — Peer Handoffs Between Agents",
      "slug": "swarm-agent-pattern",
      "url": "https://learn.engineering.vips.edu/agent-protocols/swarm-agent-pattern",
      "description": "The swarm pattern, popularised by OpenAI, models multi-agent systems as a flat set of peer agents that hand off to one another via tool calls — no top-down orchestrator required.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "swarm",
        "OpenAI",
        "handoff",
        "peer agents",
        "multi-agent pattern"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "SWE-bench for Agents: Evaluating Coding Agents",
      "slug": "swe-bench-for-agents",
      "url": "https://learn.engineering.vips.edu/agent-protocols/swe-bench-for-agents",
      "description": "SWE-bench evaluates autonomous coding agents on real GitHub issues from popular Python projects — the agent must produce a patch that resolves the issue and passes the project's own tests.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "SWE-bench",
        "coding agents",
        "benchmark",
        "Princeton",
        "software engineering"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "tau-bench — Tool-Augmented Agent Benchmark",
      "slug": "tau-bench-tool-augmented",
      "url": "https://learn.engineering.vips.edu/agent-protocols/tau-bench-tool-augmented",
      "description": "tau-bench is Sierra's benchmark for conversational agents that must use tools to complete real customer-support tasks like airline rebooking and retail returns, scored on policy compliance and task completion.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "tau-bench",
        "Sierra",
        "conversational agents",
        "tool use",
        "customer support"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "v0 by Vercel: UI-First Generative Agent",
      "slug": "v0-dev-agent",
      "url": "https://learn.engineering.vips.edu/agent-protocols/v0-dev-agent",
      "description": "v0 by Vercel is a generative UI agent specialized in React, Next.js, Tailwind, and shadcn/ui — turning natural-language prompts and screenshots into production-ready components and deployable apps.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "v0",
        "Vercel",
        "generative UI",
        "Next.js",
        "shadcn"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "WebArena: Realistic Web-Agent Benchmark",
      "slug": "webarena-benchmark",
      "url": "https://learn.engineering.vips.edu/agent-protocols/webarena-benchmark",
      "description": "WebArena is a reproducible, self-hosted benchmark from Carnegie Mellon featuring four fully-functional websites — e-commerce, forums, Gitea, content management — where agents must complete natural-language tasks end-to-end.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "WebArena",
        "benchmark",
        "web agents",
        "CMU",
        "browser automation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Writer — Enterprise Agent Platform",
      "slug": "writer-enterprise-agent-platform",
      "url": "https://learn.engineering.vips.edu/agent-protocols/writer-enterprise-agent-platform",
      "description": "Writer is a full-stack generative AI platform for enterprises, combining its own Palmyra LLM family with an agent builder, knowledge graph, and strict brand-voice and compliance controls.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Writer",
        "Palmyra",
        "enterprise LLM",
        "agent platform"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Zep — Agent Memory Platform",
      "slug": "zep-agent-memory",
      "url": "https://learn.engineering.vips.edu/agent-protocols/zep-agent-memory",
      "description": "Zep is a memory platform for AI agents that combines a temporal knowledge graph (Graphiti) with vector search to give agents persistent, queryable memory with fact-level provenance.",
      "category": "a2a",
      "categoryTitle": "Agent-to-Agent Protocols",
      "pillar": "Creativity",
      "keywords": [
        "Zep",
        "Graphiti",
        "knowledge graph",
        "agent memory",
        "temporal memory"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Accelerate (Hugging Face)",
      "slug": "accelerate-huggingface",
      "url": "https://learn.engineering.vips.edu/frameworks/accelerate-huggingface",
      "description": "Accelerate is Hugging Face's lightweight wrapper around PyTorch that makes the same training script run on CPU, single GPU, multi-GPU, TPU, DeepSpeed, and FSDP with minimal config changes.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Accelerate",
        "Hugging Face",
        "PyTorch",
        "FSDP",
        "DeepSpeed"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agno",
      "slug": "agno",
      "url": "https://learn.engineering.vips.edu/frameworks/agno",
      "description": "Agno (formerly phidata) is a high-performance Python framework for building multi-agent systems with memory, knowledge, tools, and reasoning — model-agnostic and optimised for low-latency instantiation.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Agno",
        "phidata",
        "Python agents",
        "multi-agent",
        "performance"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Aider",
      "slug": "aider",
      "url": "https://learn.engineering.vips.edu/frameworks/aider",
      "description": "Aider is a terminal-first AI pair-programmer that edits your git repo — it reads selected files, generates diffs from your natural-language requests, and commits the changes, all from the CLI.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Aider",
        "AI pair programming",
        "CLI",
        "diff edits",
        "coding agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Anthropic SDK (Python)",
      "slug": "anthropic-sdk-python",
      "url": "https://learn.engineering.vips.edu/frameworks/anthropic-sdk-python",
      "description": "The official Python SDK for Anthropic's Claude API, providing typed clients for messages, tool use, streaming, batch, files, prompt caching, and computer use.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Anthropic SDK",
        "Claude",
        "Python",
        "Messages API",
        "tool use",
        "prompt caching"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Argilla",
      "slug": "argilla-data-annotation",
      "url": "https://learn.engineering.vips.edu/frameworks/argilla-data-annotation",
      "description": "Argilla is Hugging Face's open-source data-annotation and feedback platform for LLMs — SFT, DPO, RLHF datasets, eval datasets, and continuous human review all in a single UI.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Argilla",
        "data annotation",
        "DPO",
        "RLHF",
        "Hugging Face"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Arize Phoenix",
      "slug": "arize-phoenix",
      "url": "https://learn.engineering.vips.edu/frameworks/arize-phoenix",
      "description": "Arize Phoenix is an open-source LLM observability and evaluation platform offering OpenTelemetry-compatible tracing, datasets, and experiments for AI applications.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Arize",
        "Phoenix",
        "LLM observability",
        "OpenTelemetry",
        "OpenInference",
        "tracing"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AutoGen",
      "slug": "autogen",
      "url": "https://learn.engineering.vips.edu/frameworks/autogen",
      "description": "AutoGen is Microsoft Research's open-source framework for building multi-agent conversational AI — asynchronous message-passing, layered APIs, and a visual AutoGen Studio for no-code agent design.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "AutoGen",
        "Microsoft",
        "multi-agent",
        "conversational agents",
        "Magentic-One"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AutoGPT",
      "slug": "autogpt",
      "url": "https://learn.engineering.vips.edu/frameworks/autogpt",
      "description": "AutoGPT is Significant Gravitas's autonomous agent platform that chains LLM reasoning with tools, memory, and file I/O to accomplish open-ended goals.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "AutoGPT",
        "autonomous agent",
        "Significant Gravitas",
        "agent platform"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Axolotl",
      "slug": "axolotl",
      "url": "https://learn.engineering.vips.edu/frameworks/axolotl",
      "description": "Axolotl is a config-driven fine-tuning framework for open-weight LLMs — write one YAML file describing dataset, model, and training hyperparameters, and Axolotl handles SFT, DPO, ORPO, LoRA, and full-parameter runs.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Axolotl",
        "fine-tuning",
        "YAML config",
        "LoRA",
        "DeepSpeed",
        "open source"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "BabyAGI",
      "slug": "babyagi",
      "url": "https://learn.engineering.vips.edu/frameworks/babyagi",
      "description": "BabyAGI is Yohei Nakajima's influential task-driven autonomous agent — a minimal Python loop that creates, prioritises, and executes tasks toward an objective.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "BabyAGI",
        "Yohei Nakajima",
        "autonomous agent",
        "task loop",
        "AGI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "BAML",
      "slug": "baml",
      "url": "https://learn.engineering.vips.edu/frameworks/baml",
      "description": "BAML (Boundary's AI Modeling Language) is a schema-first DSL for defining typed LLM functions. You write function signatures in .baml files and BAML generates Python/TypeScript/Ruby clients with strict types, retries, and provider portability.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "BAML",
        "structured output",
        "typed LLM",
        "schema-first",
        "Boundary"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "BentoML",
      "slug": "bentoml",
      "url": "https://learn.engineering.vips.edu/frameworks/bentoml",
      "description": "BentoML is an open-source framework for packaging, serving, and deploying AI models — from classic ML to LLMs — with BentoCloud providing managed hosting and autoscaling on AWS/GCP.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "BentoML",
        "model serving",
        "OpenLLM",
        "BentoCloud",
        "AI deployment"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "BIG-Bench Hard (BBH)",
      "slug": "big-bench-hard",
      "url": "https://learn.engineering.vips.edu/frameworks/big-bench-hard",
      "description": "BIG-Bench Hard is a curated 23-task subset of BIG-Bench where humans beat prior language models, widely used to measure chain-of-thought gains on LLMs.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "BBH",
        "BIG-Bench Hard",
        "reasoning benchmark",
        "chain-of-thought",
        "evals"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "BISHENG",
      "slug": "bisheng",
      "url": "https://learn.engineering.vips.edu/frameworks/bisheng",
      "description": "BISHENG is an open-source LLM application platform from DataElem focused on enterprise document processing, workflows, and agents, popular in the Chinese market.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "BISHENG",
        "DataElem",
        "LLM platform",
        "enterprise",
        "OCR",
        "document AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Braintrust",
      "slug": "braintrust",
      "url": "https://learn.engineering.vips.edu/frameworks/braintrust",
      "description": "Braintrust is a commercial LLM evaluation platform that combines datasets, prompt playgrounds, automated scoring, and production observability — used by many US AI labs and startups to run systematic evals.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Braintrust",
        "LLM evaluation",
        "prompt playground",
        "observability",
        "datasets"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "browser-use",
      "slug": "browser-use-python",
      "url": "https://learn.engineering.vips.edu/frameworks/browser-use-python",
      "description": "browser-use is the most popular open-source Python library for giving LLM agents control of a real Chromium browser — DOM-aware clicks, typing, and screenshots driven by tools like OpenAI, Anthropic, or Gemini.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "browser-use",
        "browser agent",
        "web automation",
        "Playwright",
        "LLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Burr",
      "slug": "burr-state-machines",
      "url": "https://learn.engineering.vips.edu/frameworks/burr-state-machines",
      "description": "Burr is DAGWorks' open-source Python framework for building LLM applications as state machines, with built-in tracing, persistence, and a web UI for debugging.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Burr",
        "DAGWorks",
        "state machines",
        "LLM orchestration",
        "Python"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "CAMEL-AI",
      "slug": "camel-ai",
      "url": "https://learn.engineering.vips.edu/frameworks/camel-ai",
      "description": "CAMEL is a pioneering open-source framework for multi-agent role-playing research, supporting a scalable society of agents for data generation and task solving.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "CAMEL-AI",
        "role-playing agents",
        "multi-agent",
        "synthetic data",
        "research"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Chonkie",
      "slug": "chonkie",
      "url": "https://learn.engineering.vips.edu/frameworks/chonkie",
      "description": "Chonkie is a fast, lightweight Python chunking library for RAG, offering token, sentence, semantic, and late chunking strategies with a small dependency footprint.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Chonkie",
        "chunking",
        "RAG",
        "text splitter",
        "semantic chunking"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Chroma",
      "slug": "chroma",
      "url": "https://learn.engineering.vips.edu/frameworks/chroma",
      "description": "Chroma is the most popular embedded open-source vector database — pip-install, run in-process, and scale up to a self-hosted or managed Chroma Cloud deployment when needed.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Chroma",
        "ChromaDB",
        "vector database",
        "embedded",
        "open source"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Codeium",
      "slug": "codeium",
      "url": "https://learn.engineering.vips.edu/frameworks/codeium",
      "description": "Codeium is the AI coding assistant and parent brand of the Windsurf IDE, offering autocomplete, chat, and agentic coding across 70+ IDEs — free for individuals, with enterprise self-hosted options.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Codeium",
        "Windsurf",
        "AI coding",
        "autocomplete",
        "self-hosted"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Cody (Sourcegraph)",
      "slug": "cody-sourcegraph",
      "url": "https://learn.engineering.vips.edu/frameworks/cody-sourcegraph",
      "description": "Cody is Sourcegraph's AI coding assistant with deep code-graph context, agentic editing, autocomplete, and repo-wide chat — available as a VS Code / JetBrains plugin and a CLI.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Cody",
        "Sourcegraph",
        "AI coding",
        "code graph",
        "IDE assistant"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Colossal-AI",
      "slug": "colossalai",
      "url": "https://learn.engineering.vips.edu/frameworks/colossalai",
      "description": "Colossal-AI is HPC-AI Tech's open-source distributed-training library for large models — heterogeneous memory management, tensor/pipeline parallelism, and a RLHF stack called Colossal-Chat.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Colossal-AI",
        "HPC-AI Tech",
        "distributed training",
        "tensor parallelism",
        "Colossal-Chat"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "ColPali",
      "slug": "colpali-retrieval",
      "url": "https://learn.engineering.vips.edu/frameworks/colpali-retrieval",
      "description": "ColPali is a visual document retrieval model that indexes PDF pages as images using a vision-language model, eliminating traditional OCR-and-chunk pipelines.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "ColPali",
        "visual RAG",
        "ColBERT",
        "PaliGemma",
        "document retrieval",
        "ViDoRe"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Comet LLM / Opik-Comet",
      "slug": "comet-llm",
      "url": "https://learn.engineering.vips.edu/frameworks/comet-llm",
      "description": "Comet's LLM offering (CometLLM and Opik) is an ML experiment tracking platform extended for LLM observability — prompt logging, evals, traces, and dashboards inside an existing Comet workspace.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Comet",
        "Comet LLM",
        "Opik",
        "LLM observability",
        "experiment tracking"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Confident AI",
      "slug": "confident-ai",
      "url": "https://learn.engineering.vips.edu/frameworks/confident-ai",
      "description": "Confident AI is the commercial cloud platform behind DeepEval — LLM evaluation, A/B testing, red-teaming, and continuous monitoring dashboards layered on top of the open-source DeepEval library.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Confident AI",
        "DeepEval",
        "LLM evaluation",
        "red teaming",
        "prompt management"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Continue.dev",
      "slug": "continue-dev",
      "url": "https://learn.engineering.vips.edu/frameworks/continue-dev",
      "description": "Continue is an open-source AI coding assistant for VS Code and JetBrains — chat, autocomplete, and agent modes that work with any model (Claude, GPT, local via Ollama) and a config-first approach to customisation.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Continue.dev",
        "VS Code",
        "AI coding",
        "autocomplete",
        "MCP",
        "open source"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Crawl4AI",
      "slug": "crawl4ai",
      "url": "https://learn.engineering.vips.edu/frameworks/crawl4ai",
      "description": "Crawl4AI is an open-source async Python crawler built specifically for LLM pipelines — it ships JS rendering via Playwright, chunking, extraction strategies, and outputs Markdown or structured JSON.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Crawl4AI",
        "web crawler",
        "RAG",
        "Playwright",
        "Markdown"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "CrewAI",
      "slug": "crewai",
      "url": "https://learn.engineering.vips.edu/frameworks/crewai",
      "description": "CrewAI is a role-based multi-agent framework for Python where you define agents, tasks, and crews that collaborate to accomplish goals — focused on simplicity and opinionated orchestration.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "CrewAI",
        "multi-agent",
        "role-based agents",
        "agent crew",
        "Python agents"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "ctransformers",
      "slug": "ctransformers",
      "url": "https://learn.engineering.vips.edu/frameworks/ctransformers",
      "description": "ctransformers is a Python binding for GGML-based transformer models (Llama, GPT-2, Falcon, MPT) with a scikit-learn-style API and a LangChain integration — an older alternative to llama-cpp-python.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "ctransformers",
        "GGML",
        "local LLM",
        "Python",
        "CPU inference"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Datadog LLM Observability",
      "slug": "datadog-llm-observability",
      "url": "https://learn.engineering.vips.edu/frameworks/datadog-llm-observability",
      "description": "Datadog LLM Observability is a managed product that correlates LLM traces, prompts, and evaluations with your existing infrastructure and APM monitoring.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Datadog",
        "LLM observability",
        "APM",
        "tracing",
        "ddtrace"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DeepEval",
      "slug": "deepeval",
      "url": "https://learn.engineering.vips.edu/frameworks/deepeval",
      "description": "DeepEval is an open-source Python framework for evaluating LLM applications — 40+ metrics (G-Eval, faithfulness, hallucination, toxicity, RAG-specific), pytest integration, and red-teaming for safety.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "DeepEval",
        "LLM evaluation",
        "red teaming",
        "pytest",
        "RAG metrics"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DeepSpeed",
      "slug": "deepspeed",
      "url": "https://learn.engineering.vips.edu/frameworks/deepspeed",
      "description": "DeepSpeed is Microsoft Research's deep-learning optimisation library — ZeRO memory sharding, pipeline parallelism, mixed precision, and inference kernels that make training and serving trillion-parameter models tractable.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "DeepSpeed",
        "ZeRO",
        "Microsoft",
        "distributed training",
        "LLM training"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Dify",
      "slug": "dify",
      "url": "https://learn.engineering.vips.edu/frameworks/dify",
      "description": "Dify is an open-source LLM application platform combining visual workflow building, RAG, agent tools, and backend hosting into a single BaaS-style product.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Dify",
        "LLMOps",
        "open-source",
        "workflow",
        "RAG",
        "chatbot platform"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Distilabel",
      "slug": "distilabel",
      "url": "https://learn.engineering.vips.edu/frameworks/distilabel",
      "description": "Distilabel is Argilla's open-source framework for generating and labelling synthetic data for LLM training — DAG-based pipelines, distillation, UltraFeedback, self-instruct, and DPO pair generation.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Distilabel",
        "Argilla",
        "synthetic data",
        "DPO",
        "distillation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Docling",
      "slug": "docling",
      "url": "https://learn.engineering.vips.edu/frameworks/docling",
      "description": "Docling is IBM Research's open-source document parser that converts PDFs, DOCX, HTML, and images into clean Markdown or JSON for LLM and RAG pipelines.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Docling",
        "IBM",
        "document AI",
        "PDF parsing",
        "RAG",
        "Markdown"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DSPy",
      "slug": "dspy",
      "url": "https://learn.engineering.vips.edu/frameworks/dspy",
      "description": "DSPy is Stanford's framework for programming — not prompting — LLMs. You declare modules and signatures in Python, and DSPy optimises the prompts and few-shot examples against your metric.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "DSPy",
        "Stanford",
        "prompt optimisation",
        "declarative LLM",
        "MIPROv2",
        "programmatic prompting"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "DVC for LLM Pipelines",
      "slug": "dvc-llm-pipelines",
      "url": "https://learn.engineering.vips.edu/frameworks/dvc-llm-pipelines",
      "description": "DVC (Data Version Control) is Iterative's Git-based tool for versioning datasets, models, and LLM pipelines — reproducible experiments, lineage, and remote storage for fine-tuning and evals.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "DVC",
        "data version control",
        "LLM pipelines",
        "reproducibility",
        "Iterative"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "EleutherAI lm-evaluation-harness",
      "slug": "eleuther-lm-eval-harness",
      "url": "https://learn.engineering.vips.edu/frameworks/eleuther-lm-eval-harness",
      "description": "lm-evaluation-harness is EleutherAI's de-facto standard framework for evaluating language models across 200+ benchmarks (MMLU, GSM8K, HellaSwag, ARC, TruthfulQA) with reproducible configs.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "lm-evaluation-harness",
        "EleutherAI",
        "LLM benchmarks",
        "MMLU",
        "GSM8K"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "ell",
      "slug": "ell",
      "url": "https://learn.engineering.vips.edu/frameworks/ell",
      "description": "ell is a lightweight Python library that treats prompts as versioned pure functions — decorator-based prompt definitions, auto-versioning, and a local studio for inspecting every invocation as a first-class artefact.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "ell",
        "prompt engineering",
        "ell.so",
        "LMP",
        "versioning"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Firecrawl",
      "slug": "firecrawl",
      "url": "https://learn.engineering.vips.edu/frameworks/firecrawl",
      "description": "Firecrawl is an open-source and hosted service that crawls websites and returns clean Markdown or structured JSON — purpose-built for feeding LLM pipelines with renderable, up-to-date web content.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Firecrawl",
        "web crawler",
        "RAG ingestion",
        "Markdown",
        "scraping"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Fireworks AI SDK",
      "slug": "fireworks-ai-sdk",
      "url": "https://learn.engineering.vips.edu/frameworks/fireworks-ai-sdk",
      "description": "Fireworks AI is a fast hosted inference service for open-source models, with an OpenAI-compatible SDK, LoRA hot-swapping, and custom fine-tuning — optimised for latency and cost.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Fireworks AI",
        "open model inference",
        "LoRA",
        "FireAttention"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Flowise",
      "slug": "flowise",
      "url": "https://learn.engineering.vips.edu/frameworks/flowise",
      "description": "Flowise is an open-source drag-and-drop UI for building LangChain-based LLM flows, chatbots, and agents — deployable as hosted API with a visual canvas.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Flowise",
        "no-code LLM",
        "drag and drop",
        "LangChain UI",
        "chatbot builder"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Galileo",
      "slug": "galileo",
      "url": "https://learn.engineering.vips.edu/frameworks/galileo",
      "description": "Galileo is an enterprise GenAI observability and evaluation platform — LLM-as-judge metrics, guardrail policies, and production-grade drift detection aimed at regulated industries shipping real-money AI.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Galileo",
        "GenAI observability",
        "hallucination detection",
        "guardrails",
        "enterprise"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Genkit",
      "slug": "genkit",
      "url": "https://learn.engineering.vips.edu/frameworks/genkit",
      "description": "Genkit is Google's open-source framework for building production GenAI apps, with SDKs in JavaScript/TypeScript, Go, and Python, tightly integrated with Firebase and Vertex AI.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Genkit",
        "Google",
        "Firebase",
        "Vertex AI",
        "Gemini",
        "GenAI framework"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GitHub Copilot CLI",
      "slug": "gh-copilot",
      "url": "https://learn.engineering.vips.edu/frameworks/gh-copilot",
      "description": "GitHub Copilot CLI is a terminal-native AI assistant that explains, suggests, and runs shell commands with confirmation — part of the wider GitHub Copilot product line.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "GitHub Copilot",
        "Copilot CLI",
        "terminal AI",
        "gh cli"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Giskard",
      "slug": "giskard",
      "url": "https://learn.engineering.vips.edu/frameworks/giskard",
      "description": "Giskard is an open-source testing framework for ML and LLM applications — detects biases, hallucinations, injection vulnerabilities, and data drift with an automated scan that generates test suites and CI checks.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Giskard",
        "LLM testing",
        "red teaming",
        "bias detection",
        "ML evaluation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPT-Engineer",
      "slug": "gpt-engineer",
      "url": "https://learn.engineering.vips.edu/frameworks/gpt-engineer",
      "description": "GPT-Engineer is an open-source CLI agent by Anton Osika that generates and iteratively improves entire codebases from a natural-language prompt.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "GPT-Engineer",
        "autonomous coding",
        "code generation",
        "Anton Osika"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "gptme",
      "slug": "gptme",
      "url": "https://learn.engineering.vips.edu/frameworks/gptme",
      "description": "gptme is a terminal-based personal AI assistant that can execute shell commands, edit files, run Python, and browse the web — a minimal, local-first alternative to Aider and Open Interpreter with broad LLM support.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "gptme",
        "terminal agent",
        "coding agent",
        "local AI",
        "Ollama"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Griptape",
      "slug": "griptape",
      "url": "https://learn.engineering.vips.edu/frameworks/griptape",
      "description": "Griptape is a Python framework for building AI agents and pipelines with a Structures API (Agents, Pipelines, Workflows), first-class RAG, and an opinionated off-prompt data approach that keeps sensitive data out of LLM context.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Griptape",
        "AI agents",
        "off-prompt",
        "Python agents"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Guidance",
      "slug": "guidance",
      "url": "https://learn.engineering.vips.edu/frameworks/guidance",
      "description": "Guidance is Microsoft's structured generation library for controlling LLM output with interleaved prompts, constraints, and regex/CFG guidance — originally designed to work with local models where token-level logit access is possible.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Guidance",
        "Microsoft",
        "structured generation",
        "constrained decoding",
        "prompt programming"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Haystack Agents",
      "slug": "haystack-agents",
      "url": "https://learn.engineering.vips.edu/frameworks/haystack-agents",
      "description": "Haystack Agents is deepset's agentic module inside Haystack 2.x — tool-using LLM agents that plug into Haystack's pipeline graph for RAG, search, and production enterprise workflows.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Haystack",
        "deepset",
        "Haystack Agents",
        "RAG agent",
        "tool calling"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Haystack",
      "slug": "haystack",
      "url": "https://learn.engineering.vips.edu/frameworks/haystack",
      "description": "Haystack is deepset's open-source Python framework for building production LLM applications — composable pipelines for RAG, agents, and document processing with strong typing and evaluation.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Haystack",
        "deepset",
        "RAG",
        "pipelines",
        "document processing"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Helicone",
      "slug": "helicone",
      "url": "https://learn.engineering.vips.edu/frameworks/helicone",
      "description": "Helicone is an open-source LLM observability platform and gateway that captures every request, logs prompts and responses, computes costs, and surfaces performance issues — deployable as SaaS or self-hosted.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Helicone",
        "LLM observability",
        "tracing",
        "prompt logging",
        "AI gateway"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Stanford HELM",
      "slug": "helm-stanford",
      "url": "https://learn.engineering.vips.edu/frameworks/helm-stanford",
      "description": "HELM (Holistic Evaluation of Language Models) is Stanford CRFM's reproducible benchmark suite covering accuracy, calibration, robustness, bias, toxicity, and efficiency.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "HELM",
        "Stanford",
        "CRFM",
        "LLM benchmark",
        "holistic evaluation",
        "bias"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Hugging Face Inference Endpoints",
      "slug": "huggingface-inference-endpoints",
      "url": "https://learn.engineering.vips.edu/frameworks/huggingface-inference-endpoints",
      "description": "Hugging Face Inference Endpoints is a managed service that deploys any Hub model as a secure, autoscaled HTTPS endpoint on AWS, Azure, or GCP — with TGI for LLMs and Inference Toolkit for the long tail.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Hugging Face",
        "Inference Endpoints",
        "TGI",
        "model hosting",
        "AWS"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Text Generation Inference (TGI)",
      "slug": "huggingface-tgi",
      "url": "https://learn.engineering.vips.edu/frameworks/huggingface-tgi",
      "description": "Text Generation Inference is Hugging Face's high-performance inference server for serving open-source LLMs with continuous batching, tensor parallelism, and quantisation.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "TGI",
        "Hugging Face",
        "LLM serving",
        "inference",
        "continuous batching"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Humanloop",
      "slug": "humanloop",
      "url": "https://learn.engineering.vips.edu/frameworks/humanloop",
      "description": "Humanloop is a hosted LLM engineering platform offering prompt management, evaluations, datasets, and observability for production AI applications.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Humanloop",
        "prompt management",
        "LLM observability",
        "evals",
        "LLMOps"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Inspect AI",
      "slug": "inspect-ai",
      "url": "https://learn.engineering.vips.edu/frameworks/inspect-ai",
      "description": "Inspect AI is the UK AI Safety Institute's open-source evaluation framework, designed for large-scale AI safety and capability benchmarks — dataset-driven, with scorers, solvers, and tool-use evals.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Inspect AI",
        "UK AISI",
        "AI safety",
        "LLM evaluation",
        "benchmarks"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Instructor",
      "slug": "instructor",
      "url": "https://learn.engineering.vips.edu/frameworks/instructor",
      "description": "Instructor is the most popular Python library for getting structured, validated outputs from LLMs — patches OpenAI-compatible clients to return Pydantic models directly, with retries and partial streaming.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Instructor",
        "structured output",
        "Pydantic",
        "LLM validation",
        "function calling"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Jan",
      "slug": "jan-ai",
      "url": "https://learn.engineering.vips.edu/frameworks/jan-ai",
      "description": "Jan is an open-source ChatGPT-alternative desktop app that runs local LLMs offline on Windows, macOS, and Linux, with an OpenAI-compatible API server, model hub, and extensions.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Jan",
        "local LLM",
        "desktop AI",
        "Cortex",
        "offline AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Jina Reader",
      "slug": "jina-reader",
      "url": "https://learn.engineering.vips.edu/frameworks/jina-reader",
      "description": "Jina Reader is a free public API that converts any URL into clean LLM-ready Markdown — just prepend r.jina.ai — plus a self-host option for private data and higher rate limits.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Jina Reader",
        "URL to markdown",
        "RAG",
        "web reader",
        "Jina AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "KServe",
      "slug": "kserve",
      "url": "https://learn.engineering.vips.edu/frameworks/kserve",
      "description": "KServe is a Kubernetes-native model serving platform — originally KFServing — that provides standard CRDs for deploying ML and LLM models with autoscaling, canary rollouts, and GPU support.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "KServe",
        "KFServing",
        "Kubernetes",
        "model serving",
        "CNCF"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Laminar",
      "slug": "laminar",
      "url": "https://learn.engineering.vips.edu/frameworks/laminar",
      "description": "Laminar is an open-source LLM observability, evals, and prompt-management platform written in Rust, with a self-hostable stack (Postgres, Clickhouse) and a cloud option.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Laminar",
        "lmnr.ai",
        "LLM observability",
        "online evals",
        "OSS"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LanceDB",
      "slug": "lancedb",
      "url": "https://learn.engineering.vips.edu/frameworks/lancedb",
      "description": "LanceDB is an embedded serverless vector database built on the Lance columnar format — zero-server, S3-native, and optimised for multimodal AI workloads with Rust, Python, and TypeScript SDKs.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "LanceDB",
        "Lance",
        "embedded vector DB",
        "serverless",
        "RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LangChain Hub",
      "slug": "langchain-hub",
      "url": "https://learn.engineering.vips.edu/frameworks/langchain-hub",
      "description": "LangChain Hub is a shared registry for prompts, runnables, and reference agents, letting teams version and pull reusable LangChain artefacts via the LangSmith UI.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "LangChain Hub",
        "LangSmith",
        "prompt registry",
        "prompt versioning"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LangChain",
      "slug": "langchain",
      "url": "https://learn.engineering.vips.edu/frameworks/langchain",
      "description": "LangChain is the dominant Python/TypeScript framework for building LLM applications — chains, agents, tool use, memory, and observability via LangSmith and deployment via LangGraph.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "LangChain",
        "LangGraph",
        "LangSmith",
        "LLM framework",
        "agents",
        "Python"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Langflow",
      "slug": "langflow",
      "url": "https://learn.engineering.vips.edu/frameworks/langflow",
      "description": "Langflow is an open-source Python-based visual IDE for designing LLM workflows, RAG pipelines, and agents, built on top of LangChain and now maintained by DataStax.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Langflow",
        "LangChain UI",
        "visual builder",
        "Python",
        "DataStax"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Langfuse",
      "slug": "langfuse",
      "url": "https://learn.engineering.vips.edu/frameworks/langfuse",
      "description": "Langfuse is the leading open-source observability, tracing, prompt management, and evaluation platform for LLM apps — self-hostable, OTel-compatible, and framework-agnostic.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Langfuse",
        "LLM observability",
        "open source",
        "tracing",
        "prompt management"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LangGraph",
      "slug": "langgraph",
      "url": "https://learn.engineering.vips.edu/frameworks/langgraph",
      "description": "LangGraph is LangChain's stateful agent framework — a low-level library for building controllable, long-running LLM agents as graphs with checkpoints, human-in-the-loop, and durable execution.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "LangGraph",
        "LangChain",
        "stateful agents",
        "agent orchestration",
        "checkpoints",
        "human-in-the-loop"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LangSmith",
      "slug": "langsmith",
      "url": "https://learn.engineering.vips.edu/frameworks/langsmith",
      "description": "LangSmith is LangChain's commercial observability, evaluation, and prompt-management platform for LLM apps — traces, datasets, online/offline evals, and prompt versioning in one tool.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "LangSmith",
        "LLM observability",
        "LangChain",
        "evals",
        "prompt management"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Langtrace",
      "slug": "langtrace",
      "url": "https://learn.engineering.vips.edu/frameworks/langtrace",
      "description": "Langtrace is an open-source OpenTelemetry-native observability platform for LLM apps with SDKs for Python and TypeScript, plus a self-hostable UI and cloud option from Scale3.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Langtrace",
        "LLM observability",
        "OpenTelemetry",
        "Scale3",
        "OSS"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LaVague",
      "slug": "lavague",
      "url": "https://learn.engineering.vips.edu/frameworks/lavague",
      "description": "LaVague is an open-source large-action-model framework that turns natural-language instructions into Selenium/Playwright code — combining a world model, an action engine, and retrieval over DOM snippets.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "LaVague",
        "large action model",
        "web agent",
        "Selenium",
        "browser"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Letta",
      "slug": "letta",
      "url": "https://learn.engineering.vips.edu/frameworks/letta",
      "description": "Letta (formerly MemGPT) is an open-source framework and server for building stateful agents with long-term memory, self-editing context, and persistent state — based on Berkeley's MemGPT research.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Letta",
        "MemGPT",
        "stateful agents",
        "long-term memory",
        "agent memory",
        "Berkeley"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Liger Kernel",
      "slug": "liger-kernel",
      "url": "https://learn.engineering.vips.edu/frameworks/liger-kernel",
      "description": "Liger Kernel is LinkedIn's open-source collection of fused Triton kernels for LLM training — RMSNorm, RoPE, SwiGLU, CrossEntropy fused with speedups of 20-30% and 50%+ memory savings.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Liger Kernel",
        "LinkedIn",
        "Triton",
        "fine-tuning",
        "fused kernels"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Lilypad (Mirascope Labs)",
      "slug": "lilypad-exponent-evals",
      "url": "https://learn.engineering.vips.edu/frameworks/lilypad-exponent-evals",
      "description": "Lilypad is an open-source prompt-engineering and LLM observability toolkit from Mirascope Labs, offering versioned prompt experiments, traces, and evals.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Lilypad",
        "Mirascope",
        "prompt observability",
        "prompt versioning",
        "LLM evals"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LiteLLM",
      "slug": "litellm",
      "url": "https://learn.engineering.vips.edu/frameworks/litellm",
      "description": "LiteLLM is an open-source Python SDK and proxy that normalises 100+ LLM providers (OpenAI, Anthropic, Azure, Bedrock, Vertex, Ollama) behind a single OpenAI-compatible API with cost tracking, fallbacks, and retries.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "LiteLLM",
        "LLM proxy",
        "AI gateway",
        "provider abstraction",
        "OpenAI compatible",
        "fallback"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LitGPT",
      "slug": "litgpt",
      "url": "https://learn.engineering.vips.edu/frameworks/litgpt",
      "description": "LitGPT is Lightning AI's hackable implementation of 20+ LLM architectures — pretraining, fine-tuning, LoRA, QLoRA, and serving, all in readable PyTorch without wrappers on top of wrappers.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "LitGPT",
        "Lightning AI",
        "fine-tuning",
        "LoRA",
        "PyTorch"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "llama-cpp-python",
      "slug": "llama-cpp-python",
      "url": "https://learn.engineering.vips.edu/frameworks/llama-cpp-python",
      "description": "llama-cpp-python is the official Python binding for llama.cpp, exposing local GGUF inference with an OpenAI-compatible server, LangChain integration, and CPU/GPU acceleration.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "llama-cpp-python",
        "llama.cpp",
        "GGUF",
        "local LLM",
        "Python"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "llama.cpp",
      "slug": "llama-cpp",
      "url": "https://learn.engineering.vips.edu/frameworks/llama-cpp",
      "description": "llama.cpp is a C/C++ inference engine for LLMs that runs Llama, Mistral, Qwen, Gemma, Phi and hundreds of other open-weight models on laptops, servers, and edge devices — no Python or CUDA required.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "llama.cpp",
        "GGUF",
        "local LLM",
        "quantization",
        "CPU inference"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LlamaParse",
      "slug": "llama-parse",
      "url": "https://learn.engineering.vips.edu/frameworks/llama-parse",
      "description": "LlamaParse is LlamaIndex's hosted document parser specialised for LLM ingestion — turning complex PDFs, slides, and tables into clean, structured Markdown.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "LlamaParse",
        "LlamaIndex",
        "PDF parsing",
        "document AI",
        "RAG ingestion"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Llama Stack",
      "slug": "llama-stack",
      "url": "https://learn.engineering.vips.edu/frameworks/llama-stack",
      "description": "Llama Stack is Meta's standardised API surface for building LLM apps — inference, safety, memory, agents, and evals behind one vendor-agnostic spec with Python/Node SDKs.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Llama Stack",
        "Meta",
        "LLM API",
        "standard",
        "agents",
        "RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "llamafile",
      "slug": "llamafile",
      "url": "https://learn.engineering.vips.edu/frameworks/llamafile",
      "description": "llamafile is Mozilla's project that packages an LLM, its weights, and llama.cpp into a single executable that runs on Linux, macOS, Windows, and BSD with no install — a fully portable local model.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "llamafile",
        "Mozilla",
        "local LLM",
        "llama.cpp",
        "portable"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LlamaIndex",
      "slug": "llamaindex",
      "url": "https://learn.engineering.vips.edu/frameworks/llamaindex",
      "description": "LlamaIndex is the Python/TypeScript framework for building RAG and retrieval pipelines over your data — 160+ loaders, query engines, and a commercial Llama Cloud for hosted ingestion.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "LlamaIndex",
        "RAG",
        "retrieval",
        "query engine",
        "Python",
        "LlamaParse"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LLM Guard",
      "slug": "llm-guard",
      "url": "https://learn.engineering.vips.edu/frameworks/llm-guard",
      "description": "LLM Guard is a security-focused open-source toolkit from Protect AI — input and output scanners for prompt injection, PII, toxicity, bias, and secret leakage that drop in front of any LLM API.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "LLM Guard",
        "Protect AI",
        "prompt injection",
        "PII",
        "LLM security"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LM Studio",
      "slug": "lm-studio",
      "url": "https://learn.engineering.vips.edu/frameworks/lm-studio",
      "description": "LM Studio is a polished desktop app for discovering, downloading, and running local LLMs on Windows, macOS, and Linux, with an OpenAI-compatible local server and a headless CLI for production.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "LM Studio",
        "local LLM",
        "GGUF",
        "MLX",
        "desktop AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Log10",
      "slug": "log10-ai",
      "url": "https://learn.engineering.vips.edu/frameworks/log10-ai",
      "description": "Log10 is an LLM observability and evaluation platform with automated log feedback, self-hosted deployment options, and debugging tools for production agents.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Log10",
        "LLM observability",
        "AutoFeedback",
        "prompt debugging"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LoRAX",
      "slug": "lorax",
      "url": "https://learn.engineering.vips.edu/frameworks/lorax",
      "description": "LoRAX is Predibase's open-source LLM server specialised in hot-swapping hundreds of LoRA adapters on a single base model for low-cost multi-tenant inference.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "LoRAX",
        "LoRA serving",
        "Predibase",
        "multi-adapter",
        "inference server"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Marker",
      "slug": "marker-pdf",
      "url": "https://learn.engineering.vips.edu/frameworks/marker-pdf",
      "description": "Marker is an open-source PDF-to-Markdown converter from Datalab that preserves layout, tables, code, and equations — widely used as the first stage of RAG pipelines and document ingestion.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Marker",
        "Datalab",
        "PDF to Markdown",
        "RAG ingestion",
        "document parsing"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Marvin",
      "slug": "marvin",
      "url": "https://learn.engineering.vips.edu/frameworks/marvin",
      "description": "Marvin is a lightweight Python library from Prefect for building AI features using type hints — classify, extract, transform, or generate with decorators over Pydantic models and native function signatures.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Marvin",
        "Prefect",
        "structured output",
        "AI functions",
        "Python typed AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Mastra",
      "slug": "mastra",
      "url": "https://learn.engineering.vips.edu/frameworks/mastra",
      "description": "Mastra is a TypeScript-first agent framework from the Gatsby founders — agents, workflows, RAG, memory, evals, and observability, designed to run on Node and edge runtimes.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Mastra",
        "TypeScript agents",
        "Node.js",
        "Vercel AI SDK",
        "agent framework"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Megatron-LM",
      "slug": "megatron-lm",
      "url": "https://learn.engineering.vips.edu/frameworks/megatron-lm",
      "description": "Megatron-LM is NVIDIA's research framework for training very large transformer models — pioneered tensor and pipeline parallelism and provides the reference kernels used across the industry.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Megatron-LM",
        "NVIDIA",
        "tensor parallelism",
        "large model training",
        "Megatron-Core"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Meilisearch",
      "slug": "meilisearch",
      "url": "https://learn.engineering.vips.edu/frameworks/meilisearch",
      "description": "Meilisearch is an open-source, developer-friendly search engine written in Rust with instant typo-tolerant BM25 search, hybrid vector+keyword retrieval, and a simple REST API — a common RAG companion to LLM stacks.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Meilisearch",
        "search engine",
        "hybrid search",
        "typo-tolerant",
        "RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MetaGPT",
      "slug": "metagpt",
      "url": "https://learn.engineering.vips.edu/frameworks/metagpt",
      "description": "MetaGPT is a multi-agent framework that assigns software-engineering roles (PM, architect, engineer, QA) to specialised LLM agents to collaboratively build projects.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "MetaGPT",
        "multi-agent",
        "software company",
        "AGI",
        "SOP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Milvus",
      "slug": "milvus",
      "url": "https://learn.engineering.vips.edu/frameworks/milvus",
      "description": "Milvus is a graduated CNCF open-source vector database engineered for billion-scale similarity search — distributed architecture, GPU indexing, hybrid dense+sparse retrieval, and a mature managed offering via Zilliz Cloud.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Milvus",
        "vector database",
        "similarity search",
        "Zilliz",
        "HNSW"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Mirascope",
      "slug": "mirascope",
      "url": "https://learn.engineering.vips.edu/frameworks/mirascope",
      "description": "Mirascope is a developer-friendly Python toolkit for LLMs — Pythonic prompt templates via decorators, typed outputs with Pydantic, and first-class support for every major provider with a thin, composable API.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Mirascope",
        "Python LLM",
        "prompt templates",
        "typed LLM",
        "decorators"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MLC LLM",
      "slug": "mlc-llm",
      "url": "https://learn.engineering.vips.edu/frameworks/mlc-llm",
      "description": "MLC LLM is a universal LLM deployment engine that compiles models to run efficiently on phones, browsers (WebGPU), Macs, and any GPU — enabling client-side inference without a server.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "MLC LLM",
        "WebGPU",
        "on-device",
        "edge AI",
        "browser LLM",
        "TVM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MLflow LLM Evaluate",
      "slug": "mlflow-llm-evaluate",
      "url": "https://learn.engineering.vips.edu/frameworks/mlflow-llm-evaluate",
      "description": "MLflow's LLM evaluation module adds mlflow.evaluate() support for language-model outputs — built-in metrics like toxicity, ROUGE, faithfulness, and custom GenAI judges logged alongside regular ML experiments.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "MLflow",
        "LLM evaluation",
        "GenAI metrics",
        "experiment tracking"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Modal",
      "slug": "modal",
      "url": "https://learn.engineering.vips.edu/frameworks/modal",
      "description": "Modal is a serverless cloud for AI and data workloads — Python-first, GPU-ready, with zero-config containers, scheduled jobs, web endpoints, and a developer experience that feels closer to importing a decorator than deploying infra.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Modal",
        "serverless GPU",
        "AI cloud",
        "fine-tuning",
        "Python"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Modular MAX Platform",
      "slug": "modular-max-platform",
      "url": "https://learn.engineering.vips.edu/frameworks/modular-max-platform",
      "description": "MAX is Modular's unified AI platform — a high-performance serving engine and Mojo-based development stack designed to outperform TensorRT-LLM and vLLM on common hardware.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Modular",
        "MAX",
        "Mojo",
        "AI infrastructure",
        "inference engine"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "NVIDIA NeMo Guardrails",
      "slug": "nvidia-nemo-guardrails",
      "url": "https://learn.engineering.vips.edu/frameworks/nvidia-nemo-guardrails",
      "description": "NeMo Guardrails is NVIDIA's open-source toolkit for adding programmable rails around LLM apps — topical, dialog, moderation, and retrieval guardrails written in the Colang DSL.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "NeMo Guardrails",
        "NVIDIA",
        "LLM safety",
        "Colang",
        "guardrails"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "NVIDIA Triton Inference Server",
      "slug": "nvidia-triton-inference-server",
      "url": "https://learn.engineering.vips.edu/frameworks/nvidia-triton-inference-server",
      "description": "Triton is NVIDIA's open-source inference server supporting PyTorch, TensorFlow, ONNX, TensorRT, and TensorRT-LLM backends for high-throughput model serving.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Triton",
        "NVIDIA",
        "inference server",
        "TensorRT-LLM",
        "model serving"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Ollama",
      "slug": "ollama",
      "url": "https://learn.engineering.vips.edu/frameworks/ollama",
      "description": "Ollama is the most popular local-first runtime for open-weight LLMs — a single binary that downloads, quantises, and serves models like Llama, Qwen, Mistral, Gemma, and Phi over an OpenAI-compatible API.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Ollama",
        "local LLM",
        "llama.cpp",
        "open weights",
        "local inference"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "olmOCR",
      "slug": "olmocr",
      "url": "https://learn.engineering.vips.edu/frameworks/olmocr",
      "description": "olmOCR is AllenAI's open-source OCR toolkit that converts PDFs and scans to clean linearised text using a vision-language model fine-tuned on millions of pages — tuned for trillion-token pretraining corpora.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "olmOCR",
        "AllenAI",
        "OCR",
        "PDF extraction",
        "open-source VLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Open Interpreter",
      "slug": "open-interpreter",
      "url": "https://learn.engineering.vips.edu/frameworks/open-interpreter",
      "description": "Open Interpreter is a natural-language interface to your computer — it writes and executes Python, Bash, JavaScript, or AppleScript locally so an LLM can edit files, query APIs, or drive native apps from a single terminal REPL.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Open Interpreter",
        "code interpreter",
        "terminal AI",
        "local agent",
        "Python"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenAI Agents SDK",
      "slug": "openai-agents-sdk",
      "url": "https://learn.engineering.vips.edu/frameworks/openai-agents-sdk",
      "description": "The OpenAI Agents SDK is OpenAI's official 2025 framework for building agentic apps with handoffs, guardrails, sessions, and tracing — a production-ready successor to the earlier Swarm experiment.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "OpenAI Agents SDK",
        "Swarm",
        "agent handoffs",
        "guardrails",
        "Responses API"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenAI Evals",
      "slug": "openai-evals",
      "url": "https://learn.engineering.vips.edu/frameworks/openai-evals",
      "description": "OpenAI Evals is OpenAI's open-source framework for building and running LLM evaluations, plus a registry of crowd-contributed benchmarks covering many tasks.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "OpenAI evals",
        "LLM evaluation",
        "benchmarks",
        "grading",
        "YAML evals"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenAI SDK (Python)",
      "slug": "openai-sdk-python",
      "url": "https://learn.engineering.vips.edu/frameworks/openai-sdk-python",
      "description": "The official Python SDK for the OpenAI API, covering Chat Completions, Responses, Assistants, Realtime, Files, Fine-tuning, Embeddings, Images, and Audio.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "OpenAI SDK",
        "Python",
        "Chat Completions",
        "Responses API",
        "Assistants",
        "embeddings"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenCompass",
      "slug": "opencompass",
      "url": "https://learn.engineering.vips.edu/frameworks/opencompass",
      "description": "OpenCompass is Shanghai AI Lab's comprehensive LLM evaluation platform supporting 100+ benchmarks and 20+ model families, widely used in the Chinese AI community.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "OpenCompass",
        "Shanghai AI Lab",
        "LLM evaluation",
        "Chinese benchmarks",
        "CompassRank"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenLLM",
      "slug": "openllm",
      "url": "https://learn.engineering.vips.edu/frameworks/openllm",
      "description": "OpenLLM by BentoML is an open platform for running and deploying open-source LLMs as OpenAI-compatible APIs, with one-command serving and built-in bento packaging.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "OpenLLM",
        "BentoML",
        "LLM serving",
        "OpenAI API",
        "local models"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenRouter",
      "slug": "openrouter",
      "url": "https://learn.engineering.vips.edu/frameworks/openrouter",
      "description": "OpenRouter is a hosted AI router that gives you a single OpenAI-compatible endpoint plus one billing account for 300+ models across Anthropic, OpenAI, Google, Meta, Mistral, DeepSeek, and open-source providers.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "OpenRouter",
        "AI router",
        "LLM gateway",
        "model aggregator",
        "OpenAI compatible"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Opik",
      "slug": "opik",
      "url": "https://learn.engineering.vips.edu/frameworks/opik",
      "description": "Opik is Comet's open-source LLM observability and evaluation platform — trace logging, prompt playground, LLM-as-judge evals, and a hosted tier that plugs into LangChain, LlamaIndex, and OpenAI.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Opik",
        "Comet",
        "LLM observability",
        "tracing",
        "evals"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Outlines",
      "slug": "outlines",
      "url": "https://learn.engineering.vips.edu/frameworks/outlines",
      "description": "Outlines is a Python library for structured text generation — it constrains an LLM's output to match a JSON schema, regex, context-free grammar, or Pydantic model at the decoding step, guaranteeing valid structure.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Outlines",
        "constrained generation",
        "structured output",
        "JSON schema",
        "grammar"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Patronus AI",
      "slug": "patronus-ai",
      "url": "https://learn.engineering.vips.edu/frameworks/patronus-ai",
      "description": "Patronus AI is an evaluation and guardrail platform for LLM applications with a library of judge models (Lynx for hallucination detection), scenario testing, and regulated-industry benchmarks.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Patronus AI",
        "LLM evals",
        "hallucination detection",
        "Lynx",
        "guardrails"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "pdfplumber",
      "slug": "pdfplumber",
      "url": "https://learn.engineering.vips.edu/frameworks/pdfplumber",
      "description": "pdfplumber is a Python library for extracting text, tables, and layout metadata from PDFs, built on pdfminer.six — the go-to tool when you need per-character precision and reliable table extraction.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "pdfplumber",
        "PDF parsing",
        "Python",
        "table extraction",
        "pdfminer"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "PEFT (Hugging Face)",
      "slug": "peft-huggingface",
      "url": "https://learn.engineering.vips.edu/frameworks/peft-huggingface",
      "description": "PEFT is Hugging Face's library of parameter-efficient fine-tuning methods — LoRA, QLoRA, IA3, prefix tuning, and more — implemented as wrappers on top of Transformers and Accelerate.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "PEFT",
        "LoRA",
        "QLoRA",
        "fine-tuning",
        "Hugging Face"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "pgvector",
      "slug": "pgvector",
      "url": "https://learn.engineering.vips.edu/frameworks/pgvector",
      "description": "pgvector is the de-facto vector similarity extension for Postgres — IVFFlat and HNSW indexes, exact and approximate search, and full SQL joins against your existing tables, no separate database required.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "pgvector",
        "Postgres",
        "vector search",
        "HNSW",
        "RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Phind",
      "slug": "phind",
      "url": "https://learn.engineering.vips.edu/frameworks/phind",
      "description": "Phind is an AI search engine and coding assistant for developers that grounds answers in live web results and documentation, with a VS Code extension and a line of fine-tuned open coding models.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Phind",
        "AI search",
        "developer AI",
        "Phind-70B",
        "coding assistant"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Pinecone",
      "slug": "pinecone",
      "url": "https://learn.engineering.vips.edu/frameworks/pinecone",
      "description": "Pinecone is the market-leading managed vector database for production AI — serverless pay-per-use architecture, billions-scale indexes, hybrid search, and native integrations with every major LLM stack.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Pinecone",
        "vector database",
        "managed",
        "serverless",
        "hybrid search"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Portkey",
      "slug": "portkey",
      "url": "https://learn.engineering.vips.edu/frameworks/portkey",
      "description": "Portkey is an AI gateway that sits between your app and LLM providers, adding semantic caching, retries, load balancing, guardrails, cost limits, and prompt management across 200+ models.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Portkey",
        "AI gateway",
        "LLM guardrails",
        "semantic cache",
        "prompt management"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Microsoft Presidio",
      "slug": "presidio-pii-anonymizer",
      "url": "https://learn.engineering.vips.edu/frameworks/presidio-pii-anonymizer",
      "description": "Presidio is Microsoft's open-source PII detection and anonymisation framework — spaCy + regex + pattern recognisers that identify and redact personal data in text, images, and structured data before it hits an LLM.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Presidio",
        "PII",
        "anonymisation",
        "Microsoft",
        "GDPR"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Microsoft PromptFlow",
      "slug": "prompt-flow-microsoft",
      "url": "https://learn.engineering.vips.edu/frameworks/prompt-flow-microsoft",
      "description": "PromptFlow is Microsoft's open-source toolkit for building, evaluating, and deploying LLM applications, integrated with Azure AI Foundry for production pipelines and tracing.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "PromptFlow",
        "Microsoft",
        "Azure AI",
        "LLM orchestration",
        "flow graph",
        "evals"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "PromptBench",
      "slug": "promptbench",
      "url": "https://learn.engineering.vips.edu/frameworks/promptbench",
      "description": "PromptBench is a Microsoft unified Python library for evaluating LLMs across benchmarks, adversarial prompts, prompt engineering, and dynamic evaluation protocols.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "PromptBench",
        "Microsoft Research",
        "LLM evaluation",
        "adversarial",
        "benchmarks"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Promptfoo",
      "slug": "promptfoo",
      "url": "https://learn.engineering.vips.edu/frameworks/promptfoo",
      "description": "Promptfoo is an open-source CLI and library for testing, evaluating, and red-teaming LLM prompts — YAML-first configs, matrix sweeps across providers, and a web viewer for side-by-side diffs.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Promptfoo",
        "LLM evals",
        "red teaming",
        "prompt testing",
        "YAML"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Pydantic AI",
      "slug": "pydantic-ai",
      "url": "https://learn.engineering.vips.edu/frameworks/pydantic-ai",
      "description": "Pydantic AI is a typed, Pythonic agent framework from the Pydantic team that brings FastAPI-style ergonomics to building production LLM apps with structured outputs, dependency injection, and built-in evals.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Pydantic AI",
        "typed agents",
        "structured output",
        "dependency injection",
        "Python"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Qdrant",
      "slug": "qdrant",
      "url": "https://learn.engineering.vips.edu/frameworks/qdrant",
      "description": "Qdrant is a high-performance open-source vector database written in Rust — rich payload filtering, hybrid dense/sparse search, quantisation, and a managed Qdrant Cloud offering.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Qdrant",
        "vector database",
        "Rust",
        "open source",
        "hybrid search"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "R2R",
      "slug": "r2r",
      "url": "https://learn.engineering.vips.edu/frameworks/r2r",
      "description": "R2R (Reason to Retrieve) is SciPhi's open-source RAG server — ingestion, hybrid search, knowledge graphs, agentic retrieval, and multi-tenant auth in a single deployable service.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "R2R",
        "SciPhi",
        "RAG server",
        "hybrid search",
        "knowledge graph"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Ragas",
      "slug": "ragas",
      "url": "https://learn.engineering.vips.edu/frameworks/ragas",
      "description": "Ragas is the standard open-source evaluation framework for RAG and agentic LLM applications — metrics for faithfulness, answer relevancy, context precision/recall, and agent tool use.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Ragas",
        "RAG evaluation",
        "LLM as judge",
        "faithfulness",
        "answer relevancy"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "RAGatouille",
      "slug": "ragatouille",
      "url": "https://learn.engineering.vips.edu/frameworks/ragatouille",
      "description": "RAGatouille is a Python library that makes ColBERT-style late-interaction retrieval practical for RAG pipelines — index, search, and fine-tune ColBERT models with a few lines, often beating single-vector dense retrieval.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "RAGatouille",
        "ColBERT",
        "late interaction",
        "retrieval",
        "RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Ray Serve LLM",
      "slug": "ray-serve-llm",
      "url": "https://learn.engineering.vips.edu/frameworks/ray-serve-llm",
      "description": "Ray Serve LLM is Anyscale's batteries-included module for serving LLMs on Ray clusters, bundling vLLM, Ray autoscaling, and an OpenAI-compatible API.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Ray Serve",
        "Anyscale",
        "LLM serving",
        "vLLM",
        "autoscaling"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Reducto",
      "slug": "reducto",
      "url": "https://learn.engineering.vips.edu/frameworks/reducto",
      "description": "Reducto is a document-AI API that parses complex PDFs, spreadsheets, and scans into structured JSON or Markdown with layout-aware chunking — built for enterprise RAG on financial filings and contracts.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Reducto",
        "document AI",
        "PDF extraction",
        "RAG chunking",
        "enterprise"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Replicate",
      "slug": "replicate",
      "url": "https://learn.engineering.vips.edu/frameworks/replicate",
      "description": "Replicate is a pay-per-second inference cloud for open-source ML models — one HTTP call to run Flux, Llama, Whisper, or any custom model pushed via the Cog container format.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Replicate",
        "Cog",
        "model hosting",
        "serverless inference",
        "API"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Requesty",
      "slug": "requesty",
      "url": "https://learn.engineering.vips.edu/frameworks/requesty",
      "description": "Requesty is an AI request router and gateway that gives developers one API key for hundreds of models, with smart routing based on cost, latency, or quality plus usage analytics and fallback handling.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Requesty",
        "AI router",
        "LLM gateway",
        "model routing",
        "cost optimization"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Rivet",
      "slug": "rivet",
      "url": "https://learn.engineering.vips.edu/frameworks/rivet",
      "description": "Rivet is Ironclad's open-source desktop IDE for visually designing, debugging, and executing LLM agent graphs with a focus on local development ergonomics.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Rivet",
        "Ironclad",
        "visual prompt IDE",
        "agent graph",
        "desktop app"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Semantic Kernel",
      "slug": "semantic-kernel",
      "url": "https://learn.engineering.vips.edu/frameworks/semantic-kernel",
      "description": "Semantic Kernel is Microsoft's open-source SDK for orchestrating LLMs, plugins, and memory in C#, Python, and Java — the enterprise-friendly alternative to LangChain with first-class Azure OpenAI support.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Semantic Kernel",
        "Microsoft",
        "LLM orchestration",
        "Azure OpenAI",
        ".NET agents"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "SGLang",
      "slug": "sglang",
      "url": "https://learn.engineering.vips.edu/frameworks/sglang",
      "description": "SGLang is a high-performance LLM serving framework with a structured-generation front-end and a RadixAttention backend that accelerates prompts with shared prefixes, often outperforming vLLM on structured workloads.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "SGLang",
        "LLM serving",
        "RadixAttention",
        "structured generation",
        "inference"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Skyvern",
      "slug": "skyvern-python",
      "url": "https://learn.engineering.vips.edu/frameworks/skyvern-python",
      "description": "Skyvern is an open-source self-hostable browser automation platform that uses LLMs plus computer vision to complete web tasks — form filling, data scraping, and multi-step flows — without brittle XPath selectors.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Skyvern",
        "browser automation",
        "vision agent",
        "web scraping",
        "LLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "smolagents",
      "slug": "smolagents",
      "url": "https://learn.engineering.vips.edu/frameworks/smolagents",
      "description": "smolagents is Hugging Face's minimal agent framework (~1000 LOC) focused on code-writing agents — LLMs that plan by generating Python rather than JSON tool calls.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "smolagents",
        "Hugging Face",
        "CodeAgent",
        "minimal agents",
        "code-writing agents"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "HumanEval+ / EvalPlus",
      "slug": "stanford-human-eval-plus",
      "url": "https://learn.engineering.vips.edu/frameworks/stanford-human-eval-plus",
      "description": "EvalPlus is a rigorously extended version of OpenAI's HumanEval and MBPP code-generation benchmarks, with 80x more test cases that catch silent failures — the reference benchmark for code LLMs.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "HumanEval+",
        "EvalPlus",
        "code benchmark",
        "MBPP+",
        "code LLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Tabnine",
      "slug": "tabnine",
      "url": "https://learn.engineering.vips.edu/frameworks/tabnine",
      "description": "Tabnine is an enterprise-focused AI coding assistant with on-prem deployment, custom-model fine-tuning on private repos, and strong data-governance controls — used in regulated industries and large engineering orgs.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Tabnine",
        "AI coding",
        "enterprise",
        "on-prem AI",
        "code completion"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Tantivy",
      "slug": "tantivy-search",
      "url": "https://learn.engineering.vips.edu/frameworks/tantivy-search",
      "description": "Tantivy is a fast, full-text search engine library written in Rust — a Lucene-inspired foundation for building custom BM25 hybrid search in RAG stacks with Python bindings (tantivy-py).",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Tantivy",
        "Rust search",
        "BM25",
        "hybrid retrieval",
        "full-text search"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "TaskWeaver",
      "slug": "taskweaver-microsoft",
      "url": "https://learn.engineering.vips.edu/frameworks/taskweaver-microsoft",
      "description": "TaskWeaver is Microsoft's code-first agent framework that converts user requests into executable Python plans, designed for data analytics and rich plugin ecosystems.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "TaskWeaver",
        "Microsoft",
        "code-first agent",
        "data analysis",
        "plugin framework"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "TensorRT-LLM",
      "slug": "tensorrt-llm",
      "url": "https://learn.engineering.vips.edu/frameworks/tensorrt-llm",
      "description": "NVIDIA TensorRT-LLM is a C++/Python library that compiles LLMs into highly-optimised CUDA engines for H100/H200/B200 GPUs, delivering the highest raw throughput of any inference stack on NVIDIA hardware.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "TensorRT-LLM",
        "NVIDIA",
        "LLM inference",
        "CUDA",
        "FP8",
        "speculative decoding"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Together AI SDK",
      "slug": "together-ai-sdk",
      "url": "https://learn.engineering.vips.edu/frameworks/together-ai-sdk",
      "description": "Together AI's Python and TypeScript SDKs give an OpenAI-compatible interface to 200+ open-source models (Llama, Mixtral, DeepSeek, Qwen) served on Together's low-latency GPU cloud.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Together AI",
        "open model inference",
        "Llama API",
        "Mixtral",
        "DeepSeek"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Together Fine-Tuning",
      "slug": "together-fine-tuning",
      "url": "https://learn.engineering.vips.edu/frameworks/together-fine-tuning",
      "description": "Together AI's managed fine-tuning service runs SFT, DPO, and continued-pretraining jobs on open-weight models (Llama, Mistral, Qwen, DeepSeek) via a hosted API, returning a deployable endpoint.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Together AI",
        "fine-tuning service",
        "managed training",
        "Llama",
        "DPO"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "torchtune",
      "slug": "torchtune",
      "url": "https://learn.engineering.vips.edu/frameworks/torchtune",
      "description": "torchtune is PyTorch's official native fine-tuning library for LLMs — recipe-driven SFT, LoRA, QLoRA, DPO, and distributed training without the Hugging Face Transformers abstraction layer.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "torchtune",
        "PyTorch",
        "fine-tuning",
        "LoRA",
        "recipe"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "OpenLLMetry (Traceloop)",
      "slug": "traceloop-openllmetry",
      "url": "https://learn.engineering.vips.edu/frameworks/traceloop-openllmetry",
      "description": "OpenLLMetry is Traceloop's open-source OpenTelemetry extension that adds standardized LLM spans to your existing tracing stack — one library, any OTLP backend.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "OpenLLMetry",
        "Traceloop",
        "OpenTelemetry",
        "LLM tracing",
        "OTLP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Trafilatura",
      "slug": "trafilatura",
      "url": "https://learn.engineering.vips.edu/frameworks/trafilatura",
      "description": "Trafilatura is a widely-used Python library for extracting main content, metadata, and comments from HTML — fast, purely local, and consistently ranked top on web-extraction benchmarks.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Trafilatura",
        "HTML extraction",
        "web scraping",
        "Python",
        "text mining"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "TRL (Transformer Reinforcement Learning)",
      "slug": "trl",
      "url": "https://learn.engineering.vips.edu/frameworks/trl",
      "description": "TRL is Hugging Face's official library for post-training LLMs — supervised fine-tuning, PPO, DPO, ORPO, KTO, GRPO, and reward-model training, all built on Transformers and Accelerate.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "TRL",
        "Hugging Face",
        "RLHF",
        "DPO",
        "GRPO",
        "preference optimization"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "TruLens",
      "slug": "trulens",
      "url": "https://learn.engineering.vips.edu/frameworks/trulens",
      "description": "TruLens is Snowflake's open-source LLM-observability and evaluation library — feedback functions for groundedness, relevance, and toxicity plus a local dashboard that traces every RAG call.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "TruLens",
        "LLM evaluation",
        "RAG triad",
        "feedback functions",
        "observability"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "turbopuffer",
      "slug": "turbopuffer",
      "url": "https://learn.engineering.vips.edu/frameworks/turbopuffer",
      "description": "turbopuffer is a serverless object-storage-native vector database — cold-start friendly, pay-per-query, and designed to hold billions of vectors at a fraction of memory-resident DB cost while still delivering sub-second ANN search.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "turbopuffer",
        "serverless vector DB",
        "multi-tenant RAG",
        "object storage"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "txtai",
      "slug": "txtai",
      "url": "https://learn.engineering.vips.edu/frameworks/txtai",
      "description": "txtai is an all-in-one embeddings database and AI toolkit for Python — vector search, RAG pipelines, agents, and language model workflows in a single lightweight package.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "txtai",
        "embeddings",
        "vector search",
        "RAG",
        "NeuML"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "TypeChat",
      "slug": "typechat",
      "url": "https://learn.engineering.vips.edu/frameworks/typechat",
      "description": "TypeChat is a Microsoft library that uses TypeScript types as the schema for LLM outputs, yielding strongly-typed, validated JSON responses without a heavy orchestration layer.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "TypeChat",
        "Microsoft",
        "structured output",
        "TypeScript",
        "schema",
        "JSON mode"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Unsloth",
      "slug": "unsloth",
      "url": "https://learn.engineering.vips.edu/frameworks/unsloth",
      "description": "Unsloth is a Python library that fine-tunes open-source LLMs (Llama, Mistral, Qwen, Gemma, Phi) 2-5x faster than HuggingFace defaults with 60-80% less memory, using custom Triton kernels and manual backprop.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Unsloth",
        "fine-tuning",
        "LoRA",
        "QLoRA",
        "Llama",
        "fast training"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Unstructured.io",
      "slug": "unstructured-io",
      "url": "https://learn.engineering.vips.edu/frameworks/unstructured-io",
      "description": "Unstructured is an open-source toolkit that extracts, cleans, and chunks content from PDFs, HTML, emails, and office docs into LLM-ready structured elements.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Unstructured",
        "document parsing",
        "PDF",
        "RAG ingestion",
        "ETL"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Verba",
      "slug": "verba",
      "url": "https://learn.engineering.vips.edu/frameworks/verba",
      "description": "Verba is Weaviate's open-source RAG chatbot — a ready-to-deploy golden-path example for ingesting documents, indexing into Weaviate, and chatting with your data via a polished web UI.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Verba",
        "Weaviate",
        "RAG chatbot",
        "golden RAGtriever",
        "chat with docs"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Vercel AI SDK",
      "slug": "vercel-ai-sdk",
      "url": "https://learn.engineering.vips.edu/frameworks/vercel-ai-sdk",
      "description": "The Vercel AI SDK is a TypeScript library for building AI-powered apps — unified generation API across OpenAI, Anthropic, Google, and 20+ providers, streaming React UI helpers, and agent / tool-use primitives.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Vercel AI SDK",
        "TypeScript",
        "streaming",
        "useChat",
        "React",
        "Zod"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Vespa",
      "slug": "vespa",
      "url": "https://learn.engineering.vips.edu/frameworks/vespa",
      "description": "Vespa is Yahoo's open-source search and retrieval engine — tensor ranking, late-interaction ColBERT, vector ANN, and structured query evaluation in one distributed platform used for web-scale AI search.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Vespa",
        "search engine",
        "tensor ranking",
        "ColBERT",
        "Yahoo"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "vLLM",
      "slug": "vllm",
      "url": "https://learn.engineering.vips.edu/frameworks/vllm",
      "description": "vLLM is the leading open-source high-throughput inference and serving engine for LLMs — PagedAttention, continuous batching, prefix caching, tensor/pipeline parallelism, and OpenAI-compatible API.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "vLLM",
        "inference",
        "LLM serving",
        "PagedAttention",
        "continuous batching"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Weaviate",
      "slug": "weaviate",
      "url": "https://learn.engineering.vips.edu/frameworks/weaviate",
      "description": "Weaviate is an open-source vector database with native hybrid search, generative modules, multi-tenancy, and a strong ecosystem of first-party apps like Verba for RAG chatbots.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Weaviate",
        "vector database",
        "hybrid search",
        "multi-tenant",
        "generative search"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "W&B Weave",
      "slug": "weights-and-biases-weave",
      "url": "https://learn.engineering.vips.edu/frameworks/weights-and-biases-weave",
      "description": "Weights & Biases Weave is a toolkit for tracking, evaluating, and iterating on LLM applications with automatic call tracing, datasets, and rigorous evaluation.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "W&B Weave",
        "Weights and Biases",
        "LLM observability",
        "evals",
        "tracing"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Zed AI",
      "slug": "zed-ai-features",
      "url": "https://learn.engineering.vips.edu/frameworks/zed-ai-features",
      "description": "Zed AI is the built-in AI assistant panel and agentic editing system inside the Zed editor — a high-performance Rust IDE with multi-model chat, inline edits, and an agentic Zed Edit mode.",
      "category": "frameworks",
      "categoryTitle": "AI Frameworks & Tooling",
      "pillar": "Capability",
      "keywords": [
        "Zed",
        "Zed AI",
        "code editor",
        "AI editor",
        "Rust IDE"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GAIA Benchmark",
      "slug": "agent-benchmark-gaia",
      "url": "https://learn.engineering.vips.edu/concepts/agent-benchmark-gaia",
      "description": "GAIA is a benchmark of 466 real-world questions that require multi-step tool use, web browsing, file handling, and reasoning — it is the standard evaluation for general AI assistants and agents, with humans scoring 92% and frontier agents historically far below.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "GAIA",
        "agent benchmark",
        "general AI assistants",
        "tool use",
        "evaluation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agentic Memory",
      "slug": "agentic-memory",
      "url": "https://learn.engineering.vips.edu/concepts/agentic-memory",
      "description": "Agentic memory is a set of techniques that give an LLM agent persistent state beyond its context window — short-term scratchpads, long-term semantic stores, and episodic logs — so it can learn from past interactions across sessions.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "agentic memory",
        "long-term memory",
        "MemGPT",
        "episodic memory",
        "agents"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Safety Red-Teaming",
      "slug": "ai-safety-red-teaming",
      "url": "https://learn.engineering.vips.edu/concepts/ai-safety-red-teaming",
      "description": "AI safety red-teaming is the practice of deliberately probing an AI system with adversarial prompts and scenarios — by humans, by other models, or automated tools — to uncover harmful, unsafe, or policy-violating behaviours before deployment.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "red-teaming",
        "AI safety",
        "adversarial testing",
        "jailbreak",
        "prompt injection",
        "alignment"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Attention Mechanism",
      "slug": "attention-mechanism",
      "url": "https://learn.engineering.vips.edu/concepts/attention-mechanism",
      "description": "Attention is a neural network operation that lets a model compute a weighted combination of input elements for each output position, where the weights are learned from the similarity between a query and a set of keys.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "attention",
        "transformer",
        "self-attention",
        "neural networks"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Batching and Continuous Batching",
      "slug": "batching-and-continuous-batching",
      "url": "https://learn.engineering.vips.edu/concepts/batching-and-continuous-batching",
      "description": "Batching runs multiple LLM inference requests through the GPU together to amortize fixed costs; continuous batching, pioneered by Orca and vLLM, dynamically adds and removes requests from the batch at every decoding step for much higher throughput.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "continuous batching",
        "iteration-level scheduling",
        "vLLM",
        "LLM serving",
        "throughput"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Beam Search",
      "slug": "beam-search",
      "url": "https://learn.engineering.vips.edu/concepts/beam-search",
      "description": "Beam search is a deterministic decoding algorithm that keeps the top-B partial sequences at every generation step and expands them — it approximates the argmax-probability sequence better than greedy decoding but tends to produce bland, repetitive text in modern LLMs.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "beam search",
        "decoding",
        "machine translation",
        "deterministic decoding"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "BM25 (Okapi BM25)",
      "slug": "bm25",
      "url": "https://learn.engineering.vips.edu/concepts/bm25",
      "description": "BM25 is the classical bag-of-words ranking function used by search engines to score documents against a query using term frequency, inverse document frequency, and document length normalization.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "BM25",
        "Okapi BM25",
        "information retrieval",
        "TF-IDF",
        "keyword search"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Chain-of-Thought Prompting",
      "slug": "chain-of-thought",
      "url": "https://learn.engineering.vips.edu/concepts/chain-of-thought",
      "description": "Chain-of-thought (CoT) prompting is a technique where the model is asked to show its step-by-step reasoning before giving a final answer, which dramatically improves accuracy on math, logic, and multi-step tasks.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "chain of thought",
        "CoT",
        "step by step",
        "reasoning",
        "scratchpad prompting"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Chatbot Arena (LMSYS)",
      "slug": "chatbot-arena",
      "url": "https://learn.engineering.vips.edu/concepts/chatbot-arena",
      "description": "Chatbot Arena is a crowdsourced LLM evaluation platform where users submit a prompt, receive anonymous responses from two different models, and vote for the better one — producing Elo-style rankings from millions of head-to-head comparisons.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "Chatbot Arena",
        "LMSYS",
        "LMArena",
        "Elo",
        "LLM evaluation",
        "preference benchmark"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Chunking Strategies (for RAG)",
      "slug": "chunking-strategies",
      "url": "https://learn.engineering.vips.edu/concepts/chunking-strategies",
      "description": "Chunking strategies are the rules by which a RAG pipeline splits documents into retrievable units — choice of size, overlap, and boundary (character, token, sentence, section, semantic) directly controls retrieval quality and answer grounding.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "chunking",
        "RAG",
        "document splitting",
        "chunk size",
        "semantic chunking"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Constitutional AI (CAI)",
      "slug": "constitutional-ai",
      "url": "https://learn.engineering.vips.edu/concepts/constitutional-ai",
      "description": "Constitutional AI is Anthropic's alignment technique where a model is trained to critique and revise its own outputs against a written set of principles (the 'constitution'), producing preference data used to fine-tune a safer assistant — largely replacing human red-teamers with AI feedback.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "Constitutional AI",
        "CAI",
        "RLAIF",
        "Anthropic",
        "alignment"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Context Window",
      "slug": "context-window",
      "url": "https://learn.engineering.vips.edu/concepts/context-window",
      "description": "The context window is the maximum number of tokens — prompt plus output — a language model can process in a single call, bounded by architecture and memory.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "context window",
        "context length",
        "long context",
        "token limit",
        "lost in the middle"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Cosine Similarity",
      "slug": "cosine-similarity",
      "url": "https://learn.engineering.vips.edu/concepts/cosine-similarity",
      "description": "Cosine similarity is a metric that measures how close two vectors point in the same direction, computed as their dot product divided by the product of their magnitudes. It's the default similarity used for embeddings.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "cosine similarity",
        "embeddings",
        "similarity metric",
        "dot product",
        "vector search"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Model Distillation",
      "slug": "distillation",
      "url": "https://learn.engineering.vips.edu/concepts/distillation",
      "description": "Model distillation is a compression technique where a small 'student' model is trained to mimic a larger 'teacher' model's outputs, transferring capability into a cheaper, faster model.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "distillation",
        "knowledge distillation",
        "model compression",
        "teacher-student",
        "DeepSeek-R1-Distill"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Direct Preference Optimization (DPO)",
      "slug": "dpo",
      "url": "https://learn.engineering.vips.edu/concepts/dpo",
      "description": "Direct Preference Optimization (DPO) is an alignment technique that fine-tunes a language model directly on pairs of preferred vs dispreferred responses, skipping the reward model and RL loop used in RLHF.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "DPO",
        "direct preference optimization",
        "alignment",
        "preference learning",
        "RLHF alternative"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Embeddings",
      "slug": "embeddings",
      "url": "https://learn.engineering.vips.edu/concepts/embeddings",
      "description": "Embeddings are dense numerical vectors that represent words, sentences, images, or other objects in a space where semantic similarity corresponds to geometric closeness.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "embeddings",
        "vector embeddings",
        "text embeddings",
        "semantic vectors",
        "dense vectors"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Few-Shot Prompting",
      "slug": "few-shot-prompting",
      "url": "https://learn.engineering.vips.edu/concepts/few-shot-prompting",
      "description": "Few-shot prompting is a technique where you include a handful of input-output examples directly in the prompt so the LLM can infer the task format and respond in kind — no weights change, the model learns in-context.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "few-shot prompting",
        "in-context learning",
        "prompting",
        "GPT-3"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Fine-tuning",
      "slug": "fine-tuning",
      "url": "https://learn.engineering.vips.edu/concepts/fine-tuning",
      "description": "Fine-tuning adapts a base LLM's weights to new task formats, style, or tone using labeled examples. Prefer RAG for new facts; fine-tune for new behavior.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "fine-tuning",
        "LoRA",
        "QLoRA",
        "LLM training",
        "PEFT"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "FlashAttention",
      "slug": "flash-attention",
      "url": "https://learn.engineering.vips.edu/concepts/flash-attention",
      "description": "FlashAttention is an IO-aware exact implementation of self-attention that tiles computation across GPU SRAM to avoid materializing the full attention matrix, giving large speedups and linear-in-sequence-length memory.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "FlashAttention",
        "FlashAttention-2",
        "FlashAttention-3",
        "GPU kernel",
        "IO-aware attention"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GGUF Format",
      "slug": "gguf-format",
      "url": "https://learn.engineering.vips.edu/concepts/gguf-format",
      "description": "GGUF is a single-file binary format for quantized LLM weights and metadata, designed for llama.cpp and its ecosystem — it packages tokenizer, architecture, and quantized tensors into one portable file that loads via mmap on CPU or GPU.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "GGUF",
        "llama.cpp",
        "Ollama",
        "local LLM",
        "quantized weights",
        "GGML"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Grouped-Query Attention (GQA)",
      "slug": "grouped-query-attention",
      "url": "https://learn.engineering.vips.edu/concepts/grouped-query-attention",
      "description": "Grouped-Query Attention (GQA) is an attention variant where multiple query heads share a single key/value head — it cuts KV-cache memory and boosts inference throughput with almost no quality loss versus full multi-head attention.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "grouped-query attention",
        "GQA",
        "KV cache",
        "multi-head attention",
        "Llama 2 70B",
        "LLM inference"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Group Relative Policy Optimization (GRPO)",
      "slug": "grpo",
      "url": "https://learn.engineering.vips.edu/concepts/grpo",
      "description": "Group Relative Policy Optimization (GRPO) is the reinforcement-learning algorithm DeepSeek used to train R1 — it drops PPO's value network and estimates advantages by comparing multiple sampled responses within the same prompt group.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "GRPO",
        "group relative policy optimization",
        "DeepSeek R1",
        "RLHF",
        "reasoning RL",
        "PPO alternative"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Guardrails (LLM Safety Layers)",
      "slug": "guardrails",
      "url": "https://learn.engineering.vips.edu/concepts/guardrails",
      "description": "Guardrails are input and output validation layers wrapped around an LLM — filters, classifiers, schema checks, and policy rules — that block unsafe, off-topic, or malformed generations before they reach users or downstream systems.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "guardrails",
        "LLM safety",
        "input validation",
        "content moderation",
        "Llama Guard"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Hallucination",
      "slug": "hallucination",
      "url": "https://learn.engineering.vips.edu/concepts/hallucination",
      "description": "Hallucination is when a language model confidently generates content that is factually wrong, fabricated, or unsupported by any provided source — the single most important reliability problem in LLM applications.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "hallucination",
        "LLM hallucination",
        "factuality",
        "grounding",
        "fabrication"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Hybrid Search (BM25 + Vector)",
      "slug": "hybrid-search",
      "url": "https://learn.engineering.vips.edu/concepts/hybrid-search",
      "description": "Hybrid search is a retrieval strategy that combines sparse keyword scoring (usually BM25) with dense vector similarity, then fuses the two ranked lists — catching both exact-term matches and semantically related passages.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "hybrid search",
        "BM25",
        "vector search",
        "RRF",
        "reciprocal rank fusion",
        "RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "HyDE (Hypothetical Document Embeddings)",
      "slug": "hyde-pattern",
      "url": "https://learn.engineering.vips.edu/concepts/hyde-pattern",
      "description": "HyDE is a retrieval technique where the LLM first generates a hypothetical answer to the user query, then embeds that generated answer and uses it — not the query — to search the vector index.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "HyDE",
        "hypothetical document embeddings",
        "RAG",
        "query expansion",
        "dense retrieval"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Instruction Tuning (SFT)",
      "slug": "instruction-tuning",
      "url": "https://learn.engineering.vips.edu/concepts/instruction-tuning",
      "description": "Instruction tuning is the supervised fine-tuning stage where a pretrained language model is trained on (instruction, response) pairs so that it learns to follow natural-language commands instead of merely continuing text.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "instruction tuning",
        "SFT",
        "supervised fine-tuning",
        "instruction following",
        "FLAN",
        "Alpaca"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "INT4 Quantization",
      "slug": "int4-quantization",
      "url": "https://learn.engineering.vips.edu/concepts/int4-quantization",
      "description": "INT4 quantization compresses LLM weights from 16-bit floating point down to 4-bit integers — it cuts model memory by ~4x and typically doubles inference throughput with only small quality degradation when paired with modern algorithms like GPTQ or AWQ.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "INT4",
        "4-bit quantization",
        "GPTQ",
        "AWQ",
        "NF4",
        "LLM inference"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "INT8 Quantization",
      "slug": "int8-quantization",
      "url": "https://learn.engineering.vips.edu/concepts/int8-quantization",
      "description": "INT8 quantization stores LLM weights (and sometimes activations) as 8-bit integers — it halves memory versus FP16 while preserving near-baseline accuracy and is the safest first step for deploying a large model on cheaper hardware.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "INT8",
        "8-bit quantization",
        "LLM.int8()",
        "quantization",
        "inference optimization"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "KV Cache",
      "slug": "kv-cache",
      "url": "https://learn.engineering.vips.edu/concepts/kv-cache",
      "description": "The KV cache stores the key and value tensors computed by self-attention for past tokens so that generating each new token becomes O(1) in sequence length instead of re-processing the entire prefix.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "KV cache",
        "attention cache",
        "grouped-query attention",
        "GQA",
        "MQA",
        "PagedAttention"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LLM-as-Judge",
      "slug": "llm-as-judge",
      "url": "https://learn.engineering.vips.edu/concepts/llm-as-judge",
      "description": "LLM-as-judge is an evaluation pattern where a language model grades or ranks another model's outputs, serving as a scalable — if imperfect — substitute for human evaluation.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "LLM as judge",
        "LLM evaluation",
        "AI evaluation",
        "MT-Bench",
        "pairwise comparison",
        "G-Eval"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LLM KV-Cache Compression",
      "slug": "llm-kv-cache-compression",
      "url": "https://learn.engineering.vips.edu/concepts/llm-kv-cache-compression",
      "description": "LLM KV-cache compression is a family of techniques — quantization, eviction, low-rank projection, token pruning — that shrink the key/value cache at inference time so long-context and high-batch serving fit on smaller GPUs.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "KV cache compression",
        "KV quantization",
        "StreamingLLM",
        "H2O",
        "long context",
        "LLM inference"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Local Attention",
      "slug": "local-attention",
      "url": "https://learn.engineering.vips.edu/concepts/local-attention",
      "description": "Local Attention is a family of attention patterns where each token only attends to a small local neighbourhood of tokens rather than the full sequence — it is the general technique behind sliding-window, block, and dilated attention designs.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "local attention",
        "sparse attention",
        "sliding window",
        "Longformer",
        "BigBird",
        "efficient transformer"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LoRA (Low-Rank Adaptation)",
      "slug": "lora",
      "url": "https://learn.engineering.vips.edu/concepts/lora",
      "description": "LoRA is a parameter-efficient fine-tuning technique that freezes a base model's weights and trains small low-rank matrices injected into each layer, drastically cutting memory and storage cost.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "LoRA",
        "low-rank adaptation",
        "PEFT",
        "QLoRA",
        "parameter-efficient fine-tuning"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Mixture of Experts (MoE)",
      "slug": "mixture-of-experts",
      "url": "https://learn.engineering.vips.edu/concepts/mixture-of-experts",
      "description": "Mixture of Experts is a neural architecture where a router sends each token to a small subset of 'expert' sub-networks, giving huge total parameter counts while keeping per-token compute low.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "mixture of experts",
        "MoE",
        "sparse models",
        "DeepSeek",
        "Mixtral",
        "expert routing"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "MMLU (Massive Multitask Language Understanding)",
      "slug": "mmlu-benchmark",
      "url": "https://learn.engineering.vips.edu/concepts/mmlu-benchmark",
      "description": "MMLU is a widely used LLM evaluation benchmark with about 16,000 multiple-choice questions across 57 subjects — from elementary math to professional law and medicine — designed to measure broad academic and professional knowledge.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "MMLU",
        "benchmark",
        "evaluation",
        "multitask",
        "language understanding"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Model Parallelism (Tensor and Pipeline)",
      "slug": "model-parallelism",
      "url": "https://learn.engineering.vips.edu/concepts/model-parallelism",
      "description": "Model parallelism is the set of techniques that split a single neural network across multiple GPUs when it is too large to fit on one — primarily tensor parallelism (splitting individual matrix multiplies) and pipeline parallelism (assigning different layers to different GPUs).",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "model parallelism",
        "tensor parallelism",
        "pipeline parallelism",
        "Megatron",
        "distributed training"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Multi-Latent Attention (MLA)",
      "slug": "multi-latent-attention",
      "url": "https://learn.engineering.vips.edu/concepts/multi-latent-attention",
      "description": "Multi-Latent Attention (MLA) is the attention variant introduced by DeepSeek that compresses keys and values into a low-rank latent vector — it shrinks the KV cache by an order of magnitude while matching or beating multi-head attention quality.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "multi-latent attention",
        "MLA",
        "DeepSeek",
        "KV cache compression",
        "low-rank attention",
        "DeepSeek-V2"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Multi-Query Attention (MQA)",
      "slug": "multi-query-attention",
      "url": "https://learn.engineering.vips.edu/concepts/multi-query-attention",
      "description": "Multi-Query Attention (MQA) is an attention variant where all query heads share a single key/value head — it shrinks the KV cache dramatically and speeds up autoregressive decoding at the cost of a small quality drop.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "multi-query attention",
        "MQA",
        "KV cache",
        "Shazeer",
        "PaLM",
        "inference optimization"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "PagedAttention",
      "slug": "pagedattention",
      "url": "https://learn.engineering.vips.edu/concepts/pagedattention",
      "description": "PagedAttention is a GPU memory-management technique from vLLM that stores each sequence's key-value cache in fixed-size non-contiguous blocks — like virtual-memory paging in an OS — eliminating the internal fragmentation that cripples naive KV-cache allocation.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "PagedAttention",
        "vLLM",
        "KV cache",
        "LLM serving",
        "memory management"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Perplexity",
      "slug": "perplexity-metric",
      "url": "https://learn.engineering.vips.edu/concepts/perplexity-metric",
      "description": "Perplexity is the exponential of the average negative log-likelihood a language model assigns to a held-out text — lower is better, and it is the oldest and simplest measure of how well a model 'predicts' natural language.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "perplexity",
        "PPL",
        "language model evaluation",
        "cross-entropy",
        "NLL"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Planning in LLM Agents",
      "slug": "planning-in-agents",
      "url": "https://learn.engineering.vips.edu/concepts/planning-in-agents",
      "description": "Planning is the agent capability of breaking a high-level goal into a sequence (or tree) of concrete sub-steps before acting, and revising the plan as new information arrives from tool results.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "agent planning",
        "ReAct",
        "Plan-and-Execute",
        "LLMCompiler",
        "agents"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Positional Encoding",
      "slug": "positional-encoding",
      "url": "https://learn.engineering.vips.edu/concepts/positional-encoding",
      "description": "Positional encoding is the technique that injects token-order information into a Transformer, since self-attention by itself is permutation-invariant and cannot distinguish sequence position.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "positional encoding",
        "positional embeddings",
        "RoPE",
        "ALiBi",
        "sinusoidal encoding"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Proximal Policy Optimization (PPO)",
      "slug": "ppo",
      "url": "https://learn.engineering.vips.edu/concepts/ppo",
      "description": "Proximal Policy Optimization (PPO) is the on-policy reinforcement-learning algorithm that became the default optimizer for RLHF — it constrains updates with a clipped ratio between new and old policies for stable training on language models.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "PPO",
        "proximal policy optimization",
        "RLHF",
        "policy gradient",
        "InstructGPT",
        "reinforcement learning"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Prompt Caching",
      "slug": "prompt-caching",
      "url": "https://learn.engineering.vips.edu/concepts/prompt-caching",
      "description": "Prompt caching is a server-side optimization that stores the KV-cache state of a stable prompt prefix so repeated requests reuse it, cutting latency and cost for long system prompts, tools, and documents.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "prompt caching",
        "KV cache reuse",
        "Anthropic prompt caching",
        "OpenAI prompt caching",
        "prefix caching"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Prompt Chaining",
      "slug": "prompt-chaining",
      "url": "https://learn.engineering.vips.edu/concepts/prompt-chaining",
      "description": "Prompt chaining is the pattern of decomposing a complex task into a sequence of simpler prompts, where each step's output feeds the next — trading latency for more reliable, auditable behavior than a single monolithic prompt.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "prompt chaining",
        "LLM pipelines",
        "LangChain",
        "DSPy"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Prompt Injection",
      "slug": "prompt-injection",
      "url": "https://learn.engineering.vips.edu/concepts/prompt-injection",
      "description": "Prompt injection is an attack where adversarial instructions hidden in untrusted input — a document, webpage, email, or tool output — override the developer's intended prompt and cause the LLM to behave maliciously.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "prompt injection",
        "indirect prompt injection",
        "LLM security",
        "jailbreak",
        "adversarial prompts"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "QLoRA — 4-bit Quantized LoRA Fine-Tuning",
      "slug": "qlora",
      "url": "https://learn.engineering.vips.edu/concepts/qlora",
      "description": "QLoRA is a fine-tuning method that quantizes a frozen base LLM to 4-bit NF4 weights and trains small LoRA adapters on top — it shrinks the memory footprint enough to fine-tune 65B-parameter models on a single 48 GB GPU.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "QLoRA",
        "4-bit fine-tuning",
        "LoRA",
        "NF4",
        "Guanaco",
        "parameter-efficient fine-tuning"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Quantization",
      "slug": "quantization",
      "url": "https://learn.engineering.vips.edu/concepts/quantization",
      "description": "Quantization is the technique of representing neural network weights and activations with fewer bits — typically INT8, INT4, or FP8 — to shrink memory use and speed up inference with minimal quality loss.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "quantization",
        "INT8",
        "INT4",
        "FP8",
        "GPTQ",
        "AWQ",
        "GGUF",
        "bitsandbytes"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Query Rewriting (for RAG)",
      "slug": "query-rewriting",
      "url": "https://learn.engineering.vips.edu/concepts/query-rewriting",
      "description": "Query rewriting is the step in a RAG pipeline where the original user query is transformed — expanded, decomposed, or reformulated — before retrieval, to increase the chance of matching the right passages in the index.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "query rewriting",
        "query expansion",
        "query decomposition",
        "HyDE",
        "RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "RAGAS Metrics",
      "slug": "ragas-metrics",
      "url": "https://learn.engineering.vips.edu/concepts/ragas-metrics",
      "description": "RAGAS is an open-source evaluation framework for RAG pipelines that scores outputs along four LLM-graded dimensions — faithfulness, answer relevance, context precision, and context recall — without needing ground-truth labels.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "RAGAS",
        "RAG evaluation",
        "faithfulness",
        "context precision",
        "LLM as judge"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "ReAct (Reason + Act)",
      "slug": "react-pattern",
      "url": "https://learn.engineering.vips.edu/concepts/react-pattern",
      "description": "ReAct is an agent pattern where an LLM interleaves reasoning traces with tool-using actions and observations, producing a Thought-Action-Observation loop until the task is solved.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "ReAct",
        "agent pattern",
        "reasoning and acting",
        "tool use",
        "LLM agent"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Reflexion (Self-Reflection Loop)",
      "slug": "reflexion-pattern",
      "url": "https://learn.engineering.vips.edu/concepts/reflexion-pattern",
      "description": "Reflexion is an agent pattern where, after an attempt fails, the LLM writes a natural-language self-critique of what went wrong and stores it in episodic memory so the next attempt is better informed — learning by reflection instead of gradient descent.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "reflexion",
        "self-reflection",
        "agent",
        "self-critique",
        "verbal reinforcement"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Reranking",
      "slug": "reranking",
      "url": "https://learn.engineering.vips.edu/concepts/reranking",
      "description": "Reranking is a second-stage retrieval step where a heavier cross-encoder model rescores the top-k candidates from a fast first-stage retriever, reordering them so the most relevant passages end up in the prompt.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "reranking",
        "cross-encoder",
        "RAG",
        "retrieval",
        "Cohere Rerank"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Retrieval-Augmented Generation (RAG)",
      "slug": "retrieval-augmented-generation",
      "url": "https://learn.engineering.vips.edu/concepts/retrieval-augmented-generation",
      "description": "Retrieval-Augmented Generation (RAG) is a pattern where an LLM is grounded on retrieved passages at query time — fewer hallucinations, up-to-date answers, no retraining required.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "RAG",
        "retrieval augmented generation",
        "embeddings",
        "vector database"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Reinforcement Learning from AI Feedback (RLAIF)",
      "slug": "rlaif",
      "url": "https://learn.engineering.vips.edu/concepts/rlaif",
      "description": "Reinforcement Learning from AI Feedback (RLAIF) is a post-training technique where a strong AI model, rather than humans, produces the preference labels used to train a reward model — it scales alignment beyond what human annotation can cheaply provide.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "RLAIF",
        "AI feedback",
        "constitutional AI",
        "RLHF alternative",
        "preference learning",
        "alignment"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Reinforcement Learning from Human Feedback (RLHF)",
      "slug": "rlhf",
      "url": "https://learn.engineering.vips.edu/concepts/rlhf",
      "description": "RLHF is the training technique that aligns a language model's behavior with human preferences by using human-ranked outputs to train a reward model, then fine-tuning the LLM against that reward with reinforcement learning.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "RLHF",
        "reinforcement learning from human feedback",
        "alignment",
        "reward model",
        "PPO"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Rotary Position Embeddings (RoPE)",
      "slug": "rotary-position-embeddings",
      "url": "https://learn.engineering.vips.edu/concepts/rotary-position-embeddings",
      "description": "Rotary Position Embeddings (RoPE) encode token position by rotating the query and key vectors inside self-attention, so relative position falls out of the attention dot product directly.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "RoPE",
        "rotary position embedding",
        "RoFormer",
        "YaRN",
        "LongRoPE"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Self-Attention",
      "slug": "self-attention",
      "url": "https://learn.engineering.vips.edu/concepts/self-attention",
      "description": "Self-attention is the mechanism that lets a Transformer weigh how strongly each token in a sequence relates to every other token, producing context-aware representations.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "self-attention",
        "attention mechanism",
        "multi-head attention",
        "QKV",
        "scaled dot-product attention"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Self-Consistency Decoding",
      "slug": "self-consistency",
      "url": "https://learn.engineering.vips.edu/concepts/self-consistency",
      "description": "Self-consistency is a decoding strategy that samples multiple chain-of-thought reasoning paths from an LLM at non-zero temperature, then picks the final answer by majority vote across the samples.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "self-consistency",
        "chain-of-thought",
        "decoding",
        "reasoning"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Semantic Chunking",
      "slug": "semantic-chunking",
      "url": "https://learn.engineering.vips.edu/concepts/semantic-chunking",
      "description": "Semantic chunking splits documents at points where the embedding similarity between consecutive sentences drops sharply — instead of fixed sizes, chunks naturally end when the topic changes, improving retrieval coherence.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "semantic chunking",
        "RAG",
        "embedding-based splitting",
        "topic boundaries"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Supervised Fine-Tuning (SFT)",
      "slug": "sft",
      "url": "https://learn.engineering.vips.edu/concepts/sft",
      "description": "Supervised Fine-Tuning (SFT) is the first post-training step for an LLM where the base model is trained on curated input-output pairs to follow instructions — it is the foundation every RLHF, DPO, or GRPO pipeline builds on top of.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "SFT",
        "supervised fine-tuning",
        "instruction tuning",
        "LLM post-training",
        "demonstration learning"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Sliding Window Attention",
      "slug": "sliding-window-attention",
      "url": "https://learn.engineering.vips.edu/concepts/sliding-window-attention",
      "description": "Sliding Window Attention is an attention pattern where each token only attends to a fixed-size window of recent tokens — it turns quadratic full attention into linear-cost local attention and is the basis for Mistral's long-context design.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "sliding window attention",
        "SWA",
        "local attention",
        "Mistral",
        "long context",
        "efficient transformer"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Speculative Decoding",
      "slug": "speculative-decoding",
      "url": "https://learn.engineering.vips.edu/concepts/speculative-decoding",
      "description": "Speculative decoding is an inference acceleration technique where a small 'draft' model proposes several tokens and a large 'target' model verifies them in parallel, yielding 2-3x speedup with identical outputs.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "speculative decoding",
        "speculative sampling",
        "Medusa",
        "EAGLE",
        "LLM inference",
        "draft model"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Structured Output",
      "slug": "structured-output",
      "url": "https://learn.engineering.vips.edu/concepts/structured-output",
      "description": "Structured output is the capability of having an LLM return JSON, a typed schema, or a tool call that conforms exactly to a declared structure — the bridge between free-form language models and deterministic code.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "structured output",
        "JSON mode",
        "function calling",
        "tool use",
        "Pydantic",
        "JSON schema"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "SWE-bench",
      "slug": "swe-bench",
      "url": "https://learn.engineering.vips.edu/concepts/swe-bench",
      "description": "SWE-bench is an LLM evaluation benchmark of real GitHub issues paired with their resolving pull requests from popular Python repositories, where the model must edit the codebase so that a set of hidden tests pass.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "SWE-bench",
        "coding benchmark",
        "evaluation",
        "agents",
        "software engineering"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Temperature Sampling",
      "slug": "temperature-sampling",
      "url": "https://learn.engineering.vips.edu/concepts/temperature-sampling",
      "description": "Temperature sampling is a decoding knob that divides the model's logits by a temperature T before softmax — lower T sharpens the distribution toward the argmax, higher T flattens it and increases randomness.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "temperature",
        "sampling",
        "decoding",
        "softmax temperature",
        "LLM generation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Tokenization",
      "slug": "tokenization",
      "url": "https://learn.engineering.vips.edu/concepts/tokenization",
      "description": "Tokenization is the process of breaking text into discrete units — usually subwords — that a language model actually consumes as input, using algorithms like BPE, WordPiece, or SentencePiece.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "tokenization",
        "BPE",
        "byte-pair encoding",
        "SentencePiece",
        "WordPiece",
        "subword tokenization"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Tool Calling (Function Calling)",
      "slug": "tool-calling",
      "url": "https://learn.engineering.vips.edu/concepts/tool-calling",
      "description": "Tool calling is the capability where an LLM emits a structured request to invoke an external function — weather lookup, SQL query, code execution — the runtime executes it, returns the result, and the model continues with that result in context.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "tool calling",
        "function calling",
        "agents",
        "MCP",
        "tool use"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Top-k Sampling",
      "slug": "top-k-sampling",
      "url": "https://learn.engineering.vips.edu/concepts/top-k-sampling",
      "description": "Top-k sampling restricts next-token choice to the k most-probable tokens, renormalizes those probabilities, and samples from the resulting distribution — a simple way to cut the long tail of low-probability garbage tokens.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "top-k sampling",
        "decoding",
        "LLM generation",
        "truncated sampling"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Top-p (Nucleus) Sampling",
      "slug": "top-p-nucleus-sampling",
      "url": "https://learn.engineering.vips.edu/concepts/top-p-nucleus-sampling",
      "description": "Top-p sampling, also called nucleus sampling, restricts the next-token distribution to the smallest set of tokens whose cumulative probability exceeds p, then renormalizes — it adapts the candidate pool dynamically to the model's confidence.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "top-p",
        "nucleus sampling",
        "decoding",
        "LLM generation",
        "Holtzman"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Transformer Architecture",
      "slug": "transformer-architecture",
      "url": "https://learn.engineering.vips.edu/concepts/transformer-architecture",
      "description": "Transformer architecture is a neural network design built around self-attention that replaced recurrent networks for sequence modeling and underpins virtually every modern large language model.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "transformer",
        "attention is all you need",
        "neural network architecture",
        "LLM architecture"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Decoder-Only Transformer",
      "slug": "transformer-decoder-only",
      "url": "https://learn.engineering.vips.edu/concepts/transformer-decoder-only",
      "description": "A decoder-only transformer is a stack of transformer blocks with causal (masked) self-attention that predicts the next token conditioned on all previous tokens — the architecture behind GPT, Claude, Llama, and most modern LLMs.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "decoder-only transformer",
        "autoregressive LLM",
        "GPT architecture",
        "transformer"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Tree of Thoughts (ToT)",
      "slug": "tree-of-thoughts",
      "url": "https://learn.engineering.vips.edu/concepts/tree-of-thoughts",
      "description": "Tree of Thoughts is a prompting framework where the LLM explores a search tree of intermediate reasoning steps, evaluates each state, and uses BFS or DFS with pruning to find a solution — generalizing chain-of-thought from a straight line to a branching search.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "tree of thoughts",
        "ToT",
        "reasoning",
        "chain-of-thought",
        "search"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Vector Database",
      "slug": "vector-database",
      "url": "https://learn.engineering.vips.edu/concepts/vector-database",
      "description": "A vector database is a specialized store that indexes high-dimensional embeddings and serves fast approximate nearest-neighbor (ANN) similarity search — the retrieval layer underneath most RAG and semantic-search systems.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "vector database",
        "vector store",
        "ANN",
        "HNSW",
        "embeddings",
        "similarity search"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Vision-Language Models (VLMs)",
      "slug": "vision-language-models",
      "url": "https://learn.engineering.vips.edu/concepts/vision-language-models",
      "description": "Vision-Language Models are multimodal neural networks that accept images (and sometimes video) alongside text, producing language outputs grounded in what they see.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "vision language model",
        "VLM",
        "multimodal",
        "CLIP",
        "LLaVA",
        "image understanding"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Zero-Shot Prompting",
      "slug": "zero-shot-prompting",
      "url": "https://learn.engineering.vips.edu/concepts/zero-shot-prompting",
      "description": "Zero-shot prompting is asking the LLM to perform a task from an instruction alone, with no worked examples in the prompt. It relies entirely on the model's pretrained knowledge and instruction-tuned capabilities.",
      "category": "concepts",
      "categoryTitle": "Core Concepts",
      "pillar": "Curiosity",
      "keywords": [
        "zero-shot prompting",
        "instruction following",
        "prompting"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Crop Pest and Disease Detection",
      "slug": "agriculture-pest-disease-detection",
      "url": "https://learn.engineering.vips.edu/applications/agriculture-pest-disease-detection",
      "description": "Smartphone-based computer vision identifies crop diseases and pests from field photos, linking smallholder farmers to targeted agronomy advice — a high-impact application for Indian and sub-Saharan food security.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "precision agriculture",
        "crop disease detection",
        "PlantVillage",
        "ICAR",
        "smallholder",
        "agri-tech",
        "Kisan"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Voice Agent for Airline Customer Service",
      "slug": "airline-customer-service-voice-agent",
      "url": "https://learn.engineering.vips.edu/applications/airline-customer-service-voice-agent",
      "description": "Airline voice agents handle rebookings, refunds, seat changes, and baggage queries on phone — grounded in PSS and PNR data, with DPDPA-compliant voice handling and hard escalation rules.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "voice agent",
        "airline AI",
        "IVR replacement",
        "real-time ASR",
        "customer service"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Protein Structure Prediction (AlphaFold & Beyond)",
      "slug": "biotech-protein-structure-prediction",
      "url": "https://learn.engineering.vips.edu/applications/biotech-protein-structure-prediction",
      "description": "AlphaFold-class models predict protein 3D structure from sequence — compressing years of experimental crystallography into hours and powering drug discovery, enzyme design, and basic biology.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "protein folding",
        "AlphaFold",
        "structural biology",
        "drug discovery",
        "ESMFold"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Automation for Climate Carbon Accounting",
      "slug": "climate-carbon-accounting-automation",
      "url": "https://learn.engineering.vips.edu/applications/climate-carbon-accounting-automation",
      "description": "Carbon accounting AI ingests invoices, meter data, and supplier reports — mapping activities to emission factors at Scope 1, 2, and 3 granularity, with auditability for BRSR and CSRD.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "carbon accounting",
        "climate AI",
        "ESG",
        "BRSR",
        "CSRD",
        "GHG Protocol"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Enterprise Multilingual Translation",
      "slug": "content-multilingual-translation",
      "url": "https://learn.engineering.vips.edu/applications/content-multilingual-translation",
      "description": "Enterprise translation uses LLMs and specialized NMT models for high-quality multilingual content — documentation, marketing, support, regulated filings — with glossary control, quality estimation, and human post-editing workflows.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "machine translation",
        "NMT",
        "LLM translation",
        "glossary",
        "TM",
        "TMS",
        "Bhashini",
        "low-resource languages"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for News Article Summarization",
      "slug": "content-news-article-summarization",
      "url": "https://learn.engineering.vips.edu/applications/content-news-article-summarization",
      "description": "Publishers and aggregators use LLMs to generate summaries, bullet-point TL;DRs, and topic pages — balancing reader value with journalistic integrity and source attribution.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "news summarization",
        "journalism AI",
        "publishing",
        "content AI",
        "RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Podcast Transcription and Chapter Generation",
      "slug": "content-podcast-transcription-and-chapter-generation",
      "url": "https://learn.engineering.vips.edu/applications/content-podcast-transcription-and-chapter-generation",
      "description": "Podcast producers use ASR and LLMs to generate searchable transcripts, chapter markers, show-notes, and social clips — dramatically reducing post-production work.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "podcast AI",
        "ASR",
        "transcription",
        "show notes",
        "content production"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Chat Deflection and Knowledge Base Agents",
      "slug": "customer-support-chat-deflection-kb-agent",
      "url": "https://learn.engineering.vips.edu/applications/customer-support-chat-deflection-kb-agent",
      "description": "RAG-based chat agents answer common support questions directly from the KB — deflecting tier-1 volume to agents while gracefully escalating to humans for anything complex.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "chat deflection",
        "support AI",
        "RAG chatbot",
        "self-service",
        "tier-1 automation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Support Knowledge Base Generation",
      "slug": "customer-support-knowledge-base-generation",
      "url": "https://learn.engineering.vips.edu/applications/customer-support-knowledge-base-generation",
      "description": "Support teams use LLMs to turn resolved tickets, engineering docs, and SME conversations into searchable knowledge base articles — keeping KB up to date without a dedicated writer.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "knowledge base",
        "customer support",
        "self-service",
        "KB generation",
        "support AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Real-Time Agent Sentiment Coaching",
      "slug": "customer-support-sentiment-realtime-coaching",
      "url": "https://learn.engineering.vips.edu/applications/customer-support-sentiment-realtime-coaching",
      "description": "Contact centers use LLMs to analyze live call sentiment and coach agents in real time — suggesting de-escalation phrases, empathy cues, and policy reminders.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "agent coaching",
        "sentiment analysis",
        "contact center AI",
        "real-time support",
        "empathy AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Customer Support Ticket Triage and Auto-Reply",
      "slug": "customer-support-ticket-triage",
      "url": "https://learn.engineering.vips.edu/applications/customer-support-ticket-triage",
      "description": "Support platforms use LLMs to classify, route, and auto-respond to tickets — with RAG over knowledge bases, confidence thresholds, and graceful human handoff on complex or emotionally charged conversations.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "support automation",
        "ticket triage",
        "Intercom Fin",
        "Zendesk",
        "deflection rate",
        "CSAT"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Customer Support Voice Call Summary",
      "slug": "customer-support-voice-call-summary",
      "url": "https://learn.engineering.vips.edu/applications/customer-support-voice-call-summary",
      "description": "Contact centers use ASR and LLMs to summarize voice calls in real time — populating tickets, capturing next steps, and measuring quality — cutting after-call work by 60-80%.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "customer support",
        "call center",
        "contact center AI",
        "ASR",
        "call summarization"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Automated Runbook Execution in DevOps",
      "slug": "devops-automated-runbook-execution",
      "url": "https://learn.engineering.vips.edu/applications/devops-automated-runbook-execution",
      "description": "Runbook-execution agents combine LLM reasoning with tool-use over Kubernetes, cloud, and infra APIs — safely running declared remediation steps with dry-run and human approval gates.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "automated runbooks",
        "SRE",
        "agentic AI",
        "incident response",
        "Kubernetes"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI CI/CD Triage Copilot for DevOps",
      "slug": "devops-ci-cd-triage-copilot",
      "url": "https://learn.engineering.vips.edu/applications/devops-ci-cd-triage-copilot",
      "description": "CI/CD triage copilots read failing pipeline logs, correlate with diffs, and propose likely causes and fixes — cutting red-PR time and restoring flow across large engineering orgs.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "CI/CD",
        "build triage",
        "developer productivity",
        "agentic copilot",
        "DevEx"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Log Anomaly Detection for DevOps",
      "slug": "devops-log-anomaly-detection",
      "url": "https://learn.engineering.vips.edu/applications/devops-log-anomaly-detection",
      "description": "AI log anomaly detection clusters, parses, and surfaces meaningful deviations across TB-scale logs — flagging incidents before they escalate while resisting alert fatigue.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "log analytics",
        "anomaly detection",
        "SRE",
        "observability",
        "AIOps"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Molecule Generation for Drug Discovery",
      "slug": "drug-discovery-molecule-generation",
      "url": "https://learn.engineering.vips.edu/applications/drug-discovery-molecule-generation",
      "description": "Generative chemistry models propose novel drug-like molecules optimized for binding, ADMET, and synthesizability — complementing AlphaFold-scale target understanding with candidate enumeration.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "drug discovery",
        "generative chemistry",
        "AI molecules",
        "de novo design",
        "biotech"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LLM-Informed Dynamic Pricing for E-commerce",
      "slug": "ecommerce-dynamic-pricing",
      "url": "https://learn.engineering.vips.edu/applications/ecommerce-dynamic-pricing",
      "description": "Dynamic pricing uses elasticity models and competitor signals to set SKU prices in near-real time — with LLMs adding narrative reasoning over promotions, inventory, and regulatory limits.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "dynamic pricing",
        "price optimization",
        "ecommerce AI",
        "demand elasticity",
        "reinforcement learning"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Product Recommendations for E-commerce",
      "slug": "ecommerce-product-recommendations",
      "url": "https://learn.engineering.vips.edu/applications/ecommerce-product-recommendations",
      "description": "LLM-assisted product recommendations combine embedding retrieval over catalog SKUs with session context and business rules — lifting conversion while respecting user privacy and catalog truth.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "product recommendations",
        "ecommerce AI",
        "embedding search",
        "reranking",
        "personalization"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Semantic Product Search for E-commerce",
      "slug": "ecommerce-semantic-search",
      "url": "https://learn.engineering.vips.edu/applications/ecommerce-semantic-search",
      "description": "Semantic search replaces brittle keyword lookup with embedding retrieval plus LLM query understanding — fixing zero-result pages, typos, and natural-language intent like 'gift for my father who likes cricket'.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "semantic search",
        "vector search",
        "ecommerce",
        "query understanding",
        "hybrid retrieval"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Accessibility Auto-Captioning",
      "slug": "education-accessibility-auto-captioning",
      "url": "https://learn.engineering.vips.edu/applications/education-accessibility-auto-captioning",
      "description": "Real-time ASR and LLMs deliver accurate captions, translations, and audio descriptions for lectures and online content — materially widening access for deaf, hard-of-hearing, and multilingual learners.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "accessibility",
        "captioning",
        "auto-captions",
        "ASR",
        "WCAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Automated Grading (Essays and Code)",
      "slug": "education-automated-grading",
      "url": "https://learn.engineering.vips.edu/applications/education-automated-grading",
      "description": "Automated grading uses LLMs with rubrics to score essays, short answers, and code — giving fast feedback at scale while requiring teacher oversight, bias audits, and transparent scoring rationales.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "automated grading",
        "AES",
        "essay scoring",
        "code grading",
        "rubric",
        "EdTech",
        "FERPA"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Curriculum and Syllabus Generation",
      "slug": "education-curriculum-generation",
      "url": "https://learn.engineering.vips.edu/applications/education-curriculum-generation",
      "description": "AI generates lesson plans, syllabi, slide decks, and assessment items aligned to curriculum standards — compressing weeks of teacher prep into hours while keeping educators in control of pedagogical design.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "curriculum generation",
        "lesson planning",
        "Common Core",
        "NCERT",
        "NEP 2020",
        "CBSE",
        "IB"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Language-Learning Conversation Partners",
      "slug": "education-language-learning-conversation-partner",
      "url": "https://learn.engineering.vips.edu/applications/education-language-learning-conversation-partner",
      "description": "Real-time voice LLMs serve as infinite-patience conversation partners for language learners — with CEFR-aligned curricula, pronunciation feedback, and cultural context.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "language learning",
        "voice AI",
        "edtech",
        "CEFR",
        "Duolingo"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Personalized Tutoring Systems",
      "slug": "education-personalized-tutoring",
      "url": "https://learn.engineering.vips.edu/applications/education-personalized-tutoring",
      "description": "AI tutors use LLMs with pedagogical prompting (Socratic method, spaced repetition, mastery learning) to give students individualized guidance at scale — with learner-safety guardrails and age-appropriate content controls.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "AI tutor",
        "personalized learning",
        "Bayesian knowledge tracing",
        "Socratic method",
        "FERPA",
        "COPPA",
        "NEP 2020"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Proctored Exam Analysis",
      "slug": "education-proctored-exam-analysis",
      "url": "https://learn.engineering.vips.edu/applications/education-proctored-exam-analysis",
      "description": "Remote-proctoring systems use multimodal AI to flag potentially anomalous behavior during online exams for human-proctor review — raising real fairness, accessibility, and bias concerns.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "online proctoring",
        "exam integrity",
        "edtech AI",
        "UGC",
        "academic integrity"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for STEM Problem-Solver Tutors",
      "slug": "education-stem-problem-solver-tutor",
      "url": "https://learn.engineering.vips.edu/applications/education-stem-problem-solver-tutor",
      "description": "STEM tutors use LLMs with code-interpreter and step-by-step reasoning to coach learners through physics, math, and engineering problems — without solving the homework for them.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "STEM tutoring",
        "edtech AI",
        "math AI",
        "physics tutoring",
        "engineering education"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Script Rewriting Copilots",
      "slug": "entertainment-script-rewriting-copilot",
      "url": "https://learn.engineering.vips.edu/applications/entertainment-script-rewriting-copilot",
      "description": "Screenwriters and showrunners use LLMs as writing-room copilots — exploring alternate scenes, punching up dialogue, and generating production paperwork — inside WGA-aligned creative control.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "script writing AI",
        "film tech",
        "WGA",
        "entertainment AI",
        "screenwriting"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Esports Match Commentary Generation",
      "slug": "esports-match-commentary-generation",
      "url": "https://learn.engineering.vips.edu/applications/esports-match-commentary-generation",
      "description": "Esports broadcasters use real-time game telemetry and LLMs to generate play-by-play commentary, multilingual dubs, and highlight summaries — supplementing or augmenting human casters.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "esports AI",
        "commentary generation",
        "broadcast AI",
        "real-time LLM",
        "gaming"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Credit Scoring Explainability",
      "slug": "finance-credit-scoring-explainability",
      "url": "https://learn.engineering.vips.edu/applications/finance-credit-scoring-explainability",
      "description": "LLMs translate complex ML credit model decisions into plain-language adverse-action notices and turn internal model explanations into customer-facing reasons consistent with fair lending laws.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "credit scoring",
        "explainability",
        "adverse action",
        "fair lending",
        "SHAP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Customer Service Chatbots in Banking",
      "slug": "finance-customer-service-chatbot",
      "url": "https://learn.engineering.vips.edu/applications/finance-customer-service-chatbot",
      "description": "Banking chatbots handle account queries, card services, loans, and disputes with LLMs backed by RAG over product documentation — subject to RBI, CFPB, and UDAAP rules that penalize misleading or discriminatory responses.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "banking chatbot",
        "RBI IT Framework",
        "CFPB",
        "UDAAP",
        "voice AI",
        "IVR"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "LLM-Powered Fraud Detection in Finance",
      "slug": "finance-fraud-detection",
      "url": "https://learn.engineering.vips.edu/applications/finance-fraud-detection",
      "description": "Modern fraud detection combines classical ML (gradient-boosted trees) with LLMs that reason over unstructured signals — chat transcripts, merchant descriptions, device telemetry — to catch novel attack patterns traditional systems miss.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "fraud detection",
        "AML",
        "SAR",
        "synthetic identity",
        "RBI",
        "FinCEN",
        "model risk management"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Invoice Processing Automation",
      "slug": "finance-invoice-processing-automation",
      "url": "https://learn.engineering.vips.edu/applications/finance-invoice-processing-automation",
      "description": "Accounts-payable teams use document AI plus LLMs to extract invoice fields, match to POs, code to GL accounts, and route approvals — achieving straight-through processing with audit trails.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "invoice automation",
        "accounts payable",
        "document AI",
        "GST",
        "e-invoicing"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Portfolio Rebalancing Assistants",
      "slug": "finance-portfolio-rebalancing-assistant",
      "url": "https://learn.engineering.vips.edu/applications/finance-portfolio-rebalancing-assistant",
      "description": "LLM-assisted portfolio rebalancing surfaces drift from target allocations, explains tax and risk implications, and proposes trades — with human advisor approval and SEBI/RIA compliance.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "portfolio rebalancing",
        "wealth management AI",
        "SEBI",
        "robo-advisor",
        "fintech"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for AML and KYC Compliance Monitoring",
      "slug": "finance-regulatory-compliance-monitoring",
      "url": "https://learn.engineering.vips.edu/applications/finance-regulatory-compliance-monitoring",
      "description": "AML / KYC automation uses LLMs for adverse media screening, sanctions reasoning, beneficial ownership extraction, and SAR narrative drafting — turning compliance from a cost center into an auditable, scalable function.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "AML",
        "KYC",
        "sanctions screening",
        "adverse media",
        "SAR",
        "FATF",
        "beneficial ownership",
        "PMLA"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Equity Research Analyst Copilot",
      "slug": "finance-research-analyst-copilot",
      "url": "https://learn.engineering.vips.edu/applications/finance-research-analyst-copilot",
      "description": "An AI equity-research copilot ingests filings, earnings calls, broker notes, and market data — summarizing, cross-checking, and drafting analyst memos while preserving SEC / SEBI compliance around regulated communications.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "equity research",
        "EDGAR",
        "SEBI RA",
        "earnings calls",
        "XBRL",
        "SEC 17a-4",
        "alpha generation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Sanctions and AML Screening",
      "slug": "finance-sanctions-screening-automation",
      "url": "https://learn.engineering.vips.edu/applications/finance-sanctions-screening-automation",
      "description": "Banks use LLMs plus entity-matching to screen customers, transactions, and counterparties against sanctions lists — reducing false positives and speeding alert triage while staying within FATF/PMLA bounds.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "sanctions screening",
        "AML",
        "KYC",
        "financial crime",
        "compliance AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Tax Return Preparation Copilots",
      "slug": "finance-tax-return-preparation-copilot",
      "url": "https://learn.engineering.vips.edu/applications/finance-tax-return-preparation-copilot",
      "description": "Tax preparation copilots use LLMs and document extraction to draft returns, explain deductions, and flag compliance issues — with CA/CPA review required for filing.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "tax preparation",
        "tax AI",
        "GST",
        "ITR",
        "income tax"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Chatbot for Government Citizen Services",
      "slug": "government-citizen-service-chatbot",
      "url": "https://learn.engineering.vips.edu/applications/government-citizen-service-chatbot",
      "description": "Citizen service chatbots answer queries on schemes, documents, tax, and benefits — grounded in authoritative government content in multiple Indian languages, with clear escalation to human officers.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "government AI",
        "citizen services",
        "Indic languages",
        "e-governance",
        "chatbot"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Multilingual Translation for Government Documents",
      "slug": "government-document-translation",
      "url": "https://learn.engineering.vips.edu/applications/government-document-translation",
      "description": "Government translation AI converts circulars, Acts, and notifications across 22 Indian languages — with human review for authoritative publication and terminology consistency via government glossaries.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "machine translation",
        "Indic languages",
        "Bhashini",
        "government AI",
        "multilingual"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Grant Proposal Evaluation",
      "slug": "government-grant-proposal-evaluation",
      "url": "https://learn.engineering.vips.edu/applications/government-grant-proposal-evaluation",
      "description": "Grant-making agencies use LLMs to summarize proposals, check completeness, detect duplication, and draft reviewer notes — with peer reviewers and program officers making final decisions.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "grant evaluation",
        "research funding",
        "govtech AI",
        "peer review",
        "public sector"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Government Permit Processing",
      "slug": "government-permit-processing-automation",
      "url": "https://learn.engineering.vips.edu/applications/government-permit-processing-automation",
      "description": "Government agencies use document AI and LLMs to triage permit applications, extract fields, check completeness, and draft decisions — speeding service delivery while preserving due process.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "govtech",
        "permit automation",
        "public sector AI",
        "e-governance",
        "administrative law"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Policy Research Copilot for Government",
      "slug": "government-policy-research-copilot",
      "url": "https://learn.engineering.vips.edu/applications/government-policy-research-copilot",
      "description": "Policy research copilots help officers synthesize legislation, case law, and international precedents into briefing notes — grounded in authoritative sources with transparent citation.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "policy research",
        "government AI",
        "legal RAG",
        "briefing notes",
        "copilot"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Public Records and FOIA/RTI Triage",
      "slug": "government-public-records-foia-triage",
      "url": "https://learn.engineering.vips.edu/applications/government-public-records-foia-triage",
      "description": "Agencies use LLMs to triage public records requests, identify responsive documents, suggest redactions, and draft response letters — reducing backlog while respecting access-to-information laws.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "FOIA",
        "RTI",
        "public records",
        "government AI",
        "transparency"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Clinical Documentation Summarization",
      "slug": "healthcare-clinical-summarization",
      "url": "https://learn.engineering.vips.edu/applications/healthcare-clinical-summarization",
      "description": "Clinical summarization uses LLMs to condense patient records, consult notes, and discharge summaries — a high-value, high-risk application requiring RAG, evaluation, and audit trails.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "clinical summarization",
        "healthcare AI",
        "medical RAG",
        "EMR",
        "HIPAA"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Clinical Trial Patient Matching",
      "slug": "healthcare-clinical-trial-matching",
      "url": "https://learn.engineering.vips.edu/applications/healthcare-clinical-trial-matching",
      "description": "Clinical trial matching uses LLMs to parse eligibility criteria against patient records to surface candidates for recruitment — accelerating enrollment while protecting patient privacy and informed consent.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "clinical trial matching",
        "trial recruitment",
        "healthcare AI",
        "EMR",
        "eligibility criteria"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Drug Interaction Checking",
      "slug": "healthcare-drug-interaction-checking",
      "url": "https://learn.engineering.vips.edu/applications/healthcare-drug-interaction-checking",
      "description": "LLM-assisted drug interaction checking combines RAG over pharmacology databases with patient-specific context to surface interactions, contraindications, and dosing concerns for clinician review.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "drug interactions",
        "pharmacology AI",
        "clinical decision support",
        "RxNorm",
        "DrugBank"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI-Assisted Medical Coding (ICD-10 and CPT)",
      "slug": "healthcare-medical-coding",
      "url": "https://learn.engineering.vips.edu/applications/healthcare-medical-coding",
      "description": "LLMs map clinical notes to ICD-10-CM diagnosis codes and CPT procedure codes for billing and claims — a high-volume, high-revenue-impact workflow where hallucinated codes translate directly into regulatory risk and denied claims.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "medical coding",
        "ICD-10",
        "CPT",
        "HCC",
        "DRG",
        "revenue cycle management",
        "AAPC"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Mental Health Chat Triage",
      "slug": "healthcare-mental-health-chat-triage",
      "url": "https://learn.engineering.vips.edu/applications/healthcare-mental-health-chat-triage",
      "description": "Mental health triage chatbots use LLMs to screen incoming patient messages for risk, route urgent cases to clinicians, and suggest self-help resources — with crisis-handling guardrails and clinician oversight.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "mental health AI",
        "triage chatbot",
        "healthcare AI",
        "crisis detection",
        "PHQ-9"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Patient Appointment Scheduling",
      "slug": "healthcare-patient-appointment-scheduling",
      "url": "https://learn.engineering.vips.edu/applications/healthcare-patient-appointment-scheduling",
      "description": "Voice and chat agents handle appointment booking, rescheduling, reminders, and triage intake — reducing call-center load while respecting accessibility and data-protection requirements.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "appointment scheduling",
        "voice AI",
        "healthcare chatbot",
        "telephony AI",
        "patient experience"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Prior Authorization Automation",
      "slug": "healthcare-prior-authorization",
      "url": "https://learn.engineering.vips.edu/applications/healthcare-prior-authorization",
      "description": "Prior authorization — insurer approval before a service is rendered — is a slow, paperwork-heavy bottleneck. AI automates eligibility checks, policy lookup, and clinical evidence extraction to speed approval decisions and reduce denials.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "prior authorization",
        "IRDAI",
        "CMS interoperability rule",
        "FHIR PAS",
        "medical necessity",
        "utilization management"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI-Assisted Radiology Reporting",
      "slug": "healthcare-radiology-reporting",
      "url": "https://learn.engineering.vips.edu/applications/healthcare-radiology-reporting",
      "description": "AI-assisted radiology reporting uses vision-language models and LLMs to draft preliminary reports from CT, MRI, and X-ray studies — accelerating radiologist workflow while keeping humans in the loop for diagnostic sign-off.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "radiology AI",
        "DICOM",
        "vision-language models",
        "RSNA",
        "FDA 510(k)",
        "clinical AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Concierge Chatbot for Hospitality",
      "slug": "hospitality-concierge-chatbot",
      "url": "https://learn.engineering.vips.edu/applications/hospitality-concierge-chatbot",
      "description": "Hotel concierge bots handle 24x7 guest requests — room service, spa booking, local tips, problem reports — grounded in the hotel PMS and a policy-constrained knowledge base.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "hospitality AI",
        "concierge chatbot",
        "hotel tech",
        "guest experience",
        "RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Employee Onboarding Assistants",
      "slug": "hr-employee-onboarding-assistant",
      "url": "https://learn.engineering.vips.edu/applications/hr-employee-onboarding-assistant",
      "description": "HR teams use LLM assistants to answer new-hire questions from HR policies, route requests, and guide onboarding tasks — with privacy-aware retrieval and escalation.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "HR AI",
        "employee onboarding",
        "HR chatbot",
        "people ops",
        "employee experience"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Internal Knowledge Search Assistants",
      "slug": "hr-internal-knowledge-search-assistant",
      "url": "https://learn.engineering.vips.edu/applications/hr-internal-knowledge-search-assistant",
      "description": "Organizations deploy LLM-powered internal search over wikis, docs, Slack, and email — surfacing institutional knowledge with permission-aware retrieval and full audit.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "enterprise search",
        "internal knowledge",
        "HR AI",
        "RAG",
        "knowledge management"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Performance Review Drafting",
      "slug": "hr-performance-review-drafting",
      "url": "https://learn.engineering.vips.edu/applications/hr-performance-review-drafting",
      "description": "Managers use LLMs to draft performance reviews from notes, 1:1 logs, and peer feedback — with explicit human editorial ownership and bias monitoring.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "performance reviews",
        "HR AI",
        "360 feedback",
        "people management",
        "review drafting"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Resume Screening (with Bias Risk Awareness)",
      "slug": "hr-resume-screening",
      "url": "https://learn.engineering.vips.edu/applications/hr-resume-screening",
      "description": "AI resume screening uses LLMs to extract structured candidate profiles and rank against job criteria — a regulatory flashpoint due to EEOC, NYC Local Law 144, EU AI Act Annex III classifications as high-risk employment AI.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "resume screening",
        "ATS",
        "EEOC",
        "NYC Local Law 144",
        "EU AI Act Annex III",
        "fair hiring",
        "bias audit"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Industrial Field Service Copilots",
      "slug": "industrial-field-service-copilot",
      "url": "https://learn.engineering.vips.edu/applications/industrial-field-service-copilot",
      "description": "Field technicians use mobile LLM copilots with equipment manuals, past repair history, and AR to diagnose and fix industrial equipment — even for equipment they've never seen before.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "field service AI",
        "industrial copilot",
        "AR maintenance",
        "technician copilot",
        "mobile AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Insurance Claims Adjudication",
      "slug": "insurance-claims-adjudication",
      "url": "https://learn.engineering.vips.edu/applications/insurance-claims-adjudication",
      "description": "Claims adjudication AI triages, extracts, and scores claims across motor, health, and property — grounded in policy documents, IRDAI norms, and structured rules, with hard human review on denials.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "insurance AI",
        "claims adjudication",
        "IRDAI",
        "IDP",
        "policy RAG"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Underwriting & Risk Assessment for Insurance",
      "slug": "insurance-underwriting-risk-assessment",
      "url": "https://learn.engineering.vips.edu/applications/insurance-underwriting-risk-assessment",
      "description": "AI underwriting combines traditional actuarial models with LLM-driven document review and external signal ingestion — pricing risk faster without drifting away from IRDAI-filed rates.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "underwriting AI",
        "risk assessment",
        "insurance",
        "IRDAI",
        "actuarial"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Legal Case Law Research Assistant",
      "slug": "legal-case-law-research",
      "url": "https://learn.engineering.vips.edu/applications/legal-case-law-research",
      "description": "AI legal research tools ground LLMs in curated case-law corpora (Westlaw, Manupatra, SCC Online) to produce cited, jurisdictionally-correct answers — avoiding the fabricated-citation disasters of ungrounded generative search.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "legal research",
        "case law",
        "Westlaw",
        "Manupatra",
        "SCC Online",
        "Shepardizing",
        "citator"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Contract Review and Redlining",
      "slug": "legal-contract-review",
      "url": "https://learn.engineering.vips.edu/applications/legal-contract-review",
      "description": "AI contract review uses LLMs to surface risky clauses, compare against playbooks, draft redlines, and negotiate against counterparty paper — freeing lawyers from first-pass review while keeping final judgment human.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "contract review",
        "redlining",
        "CLM",
        "legal playbook",
        "NDA automation",
        "DPA",
        "UPL"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI in E-Discovery and Document Review",
      "slug": "legal-discovery-document-review",
      "url": "https://learn.engineering.vips.edu/applications/legal-discovery-document-review",
      "description": "E-discovery uses LLMs for privilege review, responsiveness coding, concept search, and investigation summaries — replacing Technology-Assisted Review (TAR) first-pass work with models that reason over legal issues and facts.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "e-discovery",
        "TAR",
        "privilege review",
        "document review",
        "FRCP 26",
        "Sedona Conference",
        "CAL"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Immigration Case Triage",
      "slug": "legal-immigration-case-triage",
      "url": "https://learn.engineering.vips.edu/applications/legal-immigration-case-triage",
      "description": "Immigration attorneys and legal-aid nonprofits use LLMs to intake client facts, identify eligible pathways, draft forms, and prioritize cases — with attorney review to avoid life-altering errors.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "immigration AI",
        "legal aid",
        "asylum",
        "visa applications",
        "access to justice"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Patent Prior Art Search",
      "slug": "legal-patent-prior-art-search",
      "url": "https://learn.engineering.vips.edu/applications/legal-patent-prior-art-search",
      "description": "Patent attorneys and examiners use LLMs and semantic retrieval to surface prior art across patent databases and literature — accelerating novelty and obviousness analysis.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "patent search",
        "prior art",
        "legal AI",
        "WIPO",
        "USPTO"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Regulatory Tracking and Summaries",
      "slug": "legal-regulatory-tracking-summaries",
      "url": "https://learn.engineering.vips.edu/applications/legal-regulatory-tracking-summaries",
      "description": "Compliance teams use LLMs to monitor regulator feeds, summarize changes, map to internal controls, and draft impact assessments — keeping counsel ahead of a fast-moving regulatory landscape.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "regulatory tracking",
        "legal AI",
        "compliance",
        "regtech",
        "horizon scanning"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Route Optimization for Logistics",
      "slug": "logistics-route-optimization",
      "url": "https://learn.engineering.vips.edu/applications/logistics-route-optimization",
      "description": "Route optimization combines classical OR solvers with ML-predicted travel times and LLM-based exception handling — trimming fuel, driver hours, and late deliveries at city and national scale.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "route optimization",
        "logistics AI",
        "VRP",
        "supply chain",
        "last-mile delivery"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Predictive Maintenance",
      "slug": "manufacturing-predictive-maintenance",
      "url": "https://learn.engineering.vips.edu/applications/manufacturing-predictive-maintenance",
      "description": "Manufacturers combine sensor data, ML anomaly detection, and LLMs to predict equipment failures, prioritize maintenance, and explain recommendations to technicians in plain language.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "predictive maintenance",
        "industrial AI",
        "IoT",
        "manufacturing AI",
        "CMMS"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Vision-Based Manufacturing Quality Inspection",
      "slug": "manufacturing-quality-inspection",
      "url": "https://learn.engineering.vips.edu/applications/manufacturing-quality-inspection",
      "description": "Computer vision models — CNNs, vision transformers, and multimodal LLMs — inspect manufactured parts for defects at production speed, replacing manual QC with faster, more consistent detection paired with engineer review of edge cases.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "computer vision",
        "defect detection",
        "quality inspection",
        "Industry 4.0",
        "MES",
        "ISO 9001",
        "anomaly detection"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Supply Chain Quality and Traceability",
      "slug": "manufacturing-supply-quality-traceability",
      "url": "https://learn.engineering.vips.edu/applications/manufacturing-supply-quality-traceability",
      "description": "Manufacturers use LLMs with supply chain data and IoT feeds to trace defects, predict quality issues, and automate CAPA workflows — meeting ISO 9001, FSMA, and pharma GMP requirements.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "supply chain AI",
        "quality management",
        "traceability",
        "CAPA",
        "manufacturing AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Brand Sentiment Analysis",
      "slug": "marketing-brand-sentiment-analysis",
      "url": "https://learn.engineering.vips.edu/applications/marketing-brand-sentiment-analysis",
      "description": "Brand and PR teams use LLMs to analyze social, news, and review sentiment across multiple languages — detecting crises early and measuring campaign impact with nuanced context.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "brand sentiment",
        "social listening",
        "PR AI",
        "crisis detection",
        "marketing AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Personalized Email Marketing Campaigns",
      "slug": "marketing-personalized-email-campaigns",
      "url": "https://learn.engineering.vips.edu/applications/marketing-personalized-email-campaigns",
      "description": "AI generates personalized email subject lines, copy, send-time optimization, and segment strategies — but must respect CAN-SPAM, GDPR, DPDPA consent rules and avoid dark patterns that erode trust.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "email marketing",
        "personalization",
        "CAN-SPAM",
        "GDPR consent",
        "DPDPA",
        "Klaviyo",
        "marketing automation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for SEO Content Optimization",
      "slug": "marketing-seo-content-optimization",
      "url": "https://learn.engineering.vips.edu/applications/marketing-seo-content-optimization",
      "description": "Marketing teams use LLMs to optimize content for search — analyzing competitive SERPs, suggesting structure, and writing meta descriptions — without publishing low-quality AI slop that Google penalizes.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "SEO",
        "content optimization",
        "E-E-A-T",
        "marketing AI",
        "search rankings"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for User-Generated Content Video Analysis",
      "slug": "marketing-ugc-video-analysis",
      "url": "https://learn.engineering.vips.edu/applications/marketing-ugc-video-analysis",
      "description": "Brands use multimodal LLMs to analyze UGC videos at scale — tagging brand mentions, sentiment, context, and surfaces — for campaign analytics, creator discovery, and brand safety.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "UGC analysis",
        "creator marketing",
        "multimodal AI",
        "brand safety",
        "video AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Candidate Screening for Materials Science",
      "slug": "materials-science-candidate-screening",
      "url": "https://learn.engineering.vips.edu/applications/materials-science-candidate-screening",
      "description": "Materials science AI proposes and screens crystal structures and alloys for properties like bandgap, catalytic activity, or battery stability — compressing years of DFT and synthesis into weeks.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "materials science",
        "crystal structure prediction",
        "DFT",
        "GNoME",
        "autonomous labs"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Property Description Generation for Real Estate",
      "slug": "realestate-property-description-generation",
      "url": "https://learn.engineering.vips.edu/applications/realestate-property-description-generation",
      "description": "LLMs draft property listings from structured attributes, floor plans, and photos — grounded in verified facts, on-brand tone, and fair-housing compliance.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "real estate AI",
        "listing generation",
        "property descriptions",
        "RERA",
        "fair housing"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Clinical Protocol Generation",
      "slug": "research-clinical-protocol-generation",
      "url": "https://learn.engineering.vips.edu/applications/research-clinical-protocol-generation",
      "description": "Clinical researchers use LLMs to draft protocols from therapeutic area templates, prior studies, and regulator guidance — with formal sponsor and ethics committee review.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "clinical protocols",
        "trial design",
        "research AI",
        "ICH-GCP",
        "drug development"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Literature and Systematic Review",
      "slug": "research-literature-systematic-review",
      "url": "https://learn.engineering.vips.edu/applications/research-literature-systematic-review",
      "description": "Researchers use LLMs to accelerate systematic reviews — screening abstracts, extracting data, assessing risk-of-bias — under PRISMA and Cochrane methodology with human adjudication.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "systematic review",
        "literature review",
        "Cochrane",
        "PRISMA",
        "research AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Patent Invention Disclosure Drafting",
      "slug": "research-patent-invention-disclosure-draft",
      "url": "https://learn.engineering.vips.edu/applications/research-patent-invention-disclosure-draft",
      "description": "Inventors and tech-transfer offices use LLMs to draft invention disclosures from notebooks, papers, and inventor interviews — accelerating the handoff to patent attorneys.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "invention disclosure",
        "tech transfer",
        "patent drafting",
        "university IP",
        "research AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Inventory Forecasting for Retail",
      "slug": "retail-inventory-forecasting",
      "url": "https://learn.engineering.vips.edu/applications/retail-inventory-forecasting",
      "description": "Inventory forecasting blends classical time-series and deep-learning models with LLM reasoning over promotions, weather, and events — reducing stockouts and overstock across thousands of SKUs.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "inventory forecasting",
        "demand forecasting",
        "retail AI",
        "time-series",
        "supply chain"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Virtual Try-On for Retail",
      "slug": "retail-virtual-try-on",
      "url": "https://learn.engineering.vips.edu/applications/retail-virtual-try-on",
      "description": "Virtual try-on uses vision models and AR to let shoppers preview apparel, eyewear, cosmetics, and furniture in their space — reducing return rates and boosting confidence.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "virtual try-on",
        "AR shopping",
        "diffusion models",
        "computer vision",
        "retail tech"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Natural-Language Robot Programming",
      "slug": "robotics-natural-language-robot-programming",
      "url": "https://learn.engineering.vips.edu/applications/robotics-natural-language-robot-programming",
      "description": "Manufacturing and warehouse operators use LLMs to program robots in plain language — turning task descriptions into verified motion plans with simulation and safety gating.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "robotics AI",
        "robot programming",
        "ROS",
        "natural language robotics",
        "automation"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Contract Negotiation Copilots",
      "slug": "sales-contract-negotiation-copilot",
      "url": "https://learn.engineering.vips.edu/applications/sales-contract-negotiation-copilot",
      "description": "Legal and sales teams use LLMs to review incoming redlines against playbooks, propose counter-language, and flag risky terms — speeding contract cycles without replacing counsel.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "contract negotiation",
        "legal AI",
        "CLM",
        "contract lifecycle",
        "sales ops"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Sales Demo Scheduling Agents",
      "slug": "sales-demo-scheduling-agent",
      "url": "https://learn.engineering.vips.edu/applications/sales-demo-scheduling-agent",
      "description": "Voice and chat agents handle demo booking for inbound leads — qualifying fit, checking calendars, and confirming — without the back-and-forth email tango.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "sales AI",
        "lead scheduling",
        "Chili Piper",
        "demo booking",
        "inbound sales"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Sales Email Personalization",
      "slug": "sales-email-personalization",
      "url": "https://learn.engineering.vips.edu/applications/sales-email-personalization",
      "description": "SDRs use LLMs to personalize outbound at scale — researching prospects, drafting relevant intros, and adapting messaging — without sliding into spam territory.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "sales email",
        "SDR AI",
        "outbound",
        "email personalization",
        "prospecting"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Meeting Notes to CRM Automation",
      "slug": "sales-meeting-notes-to-crm",
      "url": "https://learn.engineering.vips.edu/applications/sales-meeting-notes-to-crm",
      "description": "Sales teams use call-recording, transcription, and LLMs to auto-populate CRM — capturing next steps, deal stage, and MEDDIC fields without reps typing notes.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "sales AI",
        "CRM automation",
        "Gong",
        "meeting notes",
        "call intelligence"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Outbound Sales Research Automation",
      "slug": "sales-outbound-research-automation",
      "url": "https://learn.engineering.vips.edu/applications/sales-outbound-research-automation",
      "description": "AI automates prospect research, account intelligence, and personalized outreach — scraping public signals (funding, hires, tech stacks) to brief SDRs and draft relevant first-touch messages.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "outbound sales",
        "sales automation",
        "prospect research",
        "Clay",
        "Apollo",
        "cold email",
        "GDPR legitimate interest"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for API Mock Generation",
      "slug": "software-api-mock-generator",
      "url": "https://learn.engineering.vips.edu/applications/software-api-mock-generator",
      "description": "LLMs read OpenAPI specs and sample responses to generate realistic, stateful API mocks — unblocking front-end and integration teams before the real backend is ready.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "API mocks",
        "developer productivity",
        "OpenAPI",
        "test data",
        "contract testing"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Code Review and PR Automation",
      "slug": "software-code-review-automation",
      "url": "https://learn.engineering.vips.edu/applications/software-code-review-automation",
      "description": "AI code review tools analyze pull requests for bugs, security flaws, and style violations — surfacing issues alongside human reviewers to cut review latency and catch regressions before merge.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "code review",
        "PR review",
        "Copilot",
        "CodeRabbit",
        "static analysis",
        "SAST"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Database Query Assistants",
      "slug": "software-database-query-assistant",
      "url": "https://learn.engineering.vips.edu/applications/software-database-query-assistant",
      "description": "Text-to-SQL assistants let analysts query databases in natural language — grounded in schema metadata, semantic layers, and governance policies.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "text-to-SQL",
        "data analytics",
        "natural language queries",
        "semantic layer",
        "data AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Dependency Vulnerability Triage",
      "slug": "software-dependency-vulnerability-triage",
      "url": "https://learn.engineering.vips.edu/applications/software-dependency-vulnerability-triage",
      "description": "Security teams use LLMs to triage CVE findings from SCA scanners — separating exploitable vulnerabilities from noisy false positives by analyzing call graphs and fix availability.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "SCA",
        "vulnerability management",
        "DevSecOps",
        "CVE triage",
        "supply chain security"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Automated Documentation Generation",
      "slug": "software-documentation-generation",
      "url": "https://learn.engineering.vips.edu/applications/software-documentation-generation",
      "description": "AI generates API references, architecture docs, runbooks, and tutorials from source code and commit history — keeping documentation in sync with fast-moving codebases without becoming the stale document problem in reverse.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "documentation generation",
        "API docs",
        "technical writing",
        "OpenAPI",
        "docs-as-code",
        "Diataxis"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Incident Response and On-Call Copilot",
      "slug": "software-incident-response-copilot",
      "url": "https://learn.engineering.vips.edu/applications/software-incident-response-copilot",
      "description": "Incident response copilots correlate alerts, query logs, propose hypotheses, and draft status updates — accelerating mean-time-to-resolution (MTTR) for on-call engineers while keeping humans in control of mitigation actions.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "incident response",
        "SRE",
        "AIOps",
        "MTTR",
        "postmortem",
        "observability"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Legacy Code Modernization",
      "slug": "software-legacy-code-modernization",
      "url": "https://learn.engineering.vips.edu/applications/software-legacy-code-modernization",
      "description": "LLMs accelerate COBOL-to-Java, VB6-to-C#, monolith-to-microservices migrations by reading old code, documenting intent, and drafting equivalent modern code with test harnesses.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "code modernization",
        "COBOL",
        "legacy migration",
        "AI coding",
        "refactoring"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Observability and Root Cause Analysis",
      "slug": "software-observability-root-cause-analysis",
      "url": "https://learn.engineering.vips.edu/applications/software-observability-root-cause-analysis",
      "description": "SRE teams use LLMs to summarize incidents, correlate logs/traces/metrics, and propose probable root causes — reducing MTTR and capturing tribal knowledge.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "observability",
        "SRE",
        "root cause analysis",
        "incident response",
        "AIOps"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Large-Scale Refactoring Assistant",
      "slug": "software-refactoring-assistant",
      "url": "https://learn.engineering.vips.edu/applications/software-refactoring-assistant",
      "description": "Refactoring copilots plan and execute codebase-wide transformations — framework migrations, deprecations, API updates — using LLMs with deterministic tooling (AST transforms, codemods) for safety at scale.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "refactoring",
        "codemod",
        "AST",
        "Cursor",
        "Claude Code",
        "OpenRewrite",
        "migration"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Test Generation (Unit and Integration)",
      "slug": "software-test-generation",
      "url": "https://learn.engineering.vips.edu/applications/software-test-generation",
      "description": "AI generates unit, integration, and regression tests from source code — boosting coverage, catching edge cases, and producing tests that execute as verification not placebo.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "test generation",
        "unit tests",
        "mutation testing",
        "TestGen-LLM",
        "Diffblue",
        "property-based testing"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Sports Player Performance Analytics",
      "slug": "sports-player-performance-analytics",
      "url": "https://learn.engineering.vips.edu/applications/sports-player-performance-analytics",
      "description": "Teams and coaches combine computer vision, sensor data, and LLMs to analyze player performance — tracking metrics, identifying tactical patterns, and generating coach-ready reports.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "sports analytics",
        "player performance",
        "computer vision",
        "wearable AI",
        "sport tech"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Demand Forecasting for Supply Chain",
      "slug": "supply-chain-demand-forecasting",
      "url": "https://learn.engineering.vips.edu/applications/supply-chain-demand-forecasting",
      "description": "Supply-chain demand forecasting blends hierarchical time-series models with LLM-ingested qualitative signals — letting planners see demand at SKU-DC-week granularity across global networks.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "demand forecasting",
        "supply chain AI",
        "S&OP",
        "hierarchical forecasting",
        "sales planning"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI for Sustainability & ESG Reporting",
      "slug": "sustainability-esg-reporting",
      "url": "https://learn.engineering.vips.edu/applications/sustainability-esg-reporting",
      "description": "ESG reporting AI drafts BRSR, CSRD, and GRI disclosures from internal data — materiality-scoped, evidence-linked, and assurance-ready — while resisting greenwashing language.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "ESG reporting",
        "BRSR",
        "CSRD",
        "sustainability AI",
        "GRI",
        "disclosure"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Personalized Itinerary Assistant for Travel",
      "slug": "travel-personalized-itinerary-assistant",
      "url": "https://learn.engineering.vips.edu/applications/travel-personalized-itinerary-assistant",
      "description": "Itinerary assistants combine LLM reasoning with live inventory (flights, hotels, activities) to build and rebook trips on demand — a killer app when grounded in booking APIs, not hallucinated hotels.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "travel AI",
        "itinerary planning",
        "agentic",
        "LLM agents",
        "tourism tech"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Vision for Warehouse Robotics",
      "slug": "warehouse-robotics-vision",
      "url": "https://learn.engineering.vips.edu/applications/warehouse-robotics-vision",
      "description": "Warehouse robotics vision powers bin-picking, pallet audit, and autonomous mobile robots with real-time 3D perception, VLM-assisted exception handling, and safety-rated fail-safes.",
      "category": "applications",
      "categoryTitle": "Applications",
      "pillar": "Contribution",
      "keywords": [
        "warehouse robotics",
        "computer vision",
        "bin picking",
        "autonomous mobile robots",
        "VLM"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI at VSET — the engineering curriculum explained",
      "slug": "ai-curriculum-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/ai-curriculum-at-vset",
      "description": "VSET, VIPS-TC's engineering school, offers two full AI-focused B.Tech tracks (AI & ML, AI & DS) under GGSIPU, plus CSE with AI electives, an AICTE IDEA Lab, and a Quantum Research Lab.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET AI curriculum",
        "VIPS AI B.Tech",
        "AI college IPU Delhi",
        "GGSIPU AI",
        "AI & ML B.Tech"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Best AI college in IP University (GGSIPU) — the case for VSET",
      "slug": "best-ai-college-in-ip-university-ggsipu",
      "url": "https://learn.engineering.vips.edu/at-vips/best-ai-college-in-ip-university-ggsipu",
      "description": "Among GGSIPU colleges, USICT leads overall; among private affiliates VSET makes a clear case for best-AI — two dedicated AI B.Tech tracks, AICTE IDEA Lab, Quantum Research Lab, NAAC A++.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "best AI college IP University",
        "best AI GGSIPU",
        "AI college Delhi",
        "VIPS AI",
        "VSET AI ranking"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Career Paths after a VSET AI Degree — outcomes from the AI-leading engineering college in GGSIPU",
      "slug": "career-paths-after-vset-ai-degree",
      "url": "https://learn.engineering.vips.edu/at-vips/career-paths-after-vset-ai-degree",
      "description": "Graduates of VSET's B.Tech CSE (AI & ML) and (AI & DS) tracks — VIPS-TC's engineering school — commonly progress into AI / ML engineering, data-science, research, and graduate study, shaped by the AI-leading engineering college in GGSIPU and its industry network.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "career after VSET",
        "VIPS AI careers",
        "ML engineer GGSIPU",
        "AI jobs Delhi",
        "B.Tech AI career paths"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Agent engineering at VSET — building real LLM agents in the B.Tech AI tracks",
      "slug": "learn-agent-engineering-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/learn-agent-engineering-at-vset",
      "description": "VSET's B.Tech AI & ML and AI & DS programmes treat agent engineering as core curriculum — students build tool-using LLM agents with LangGraph, MCP, and evaluation harnesses inside the AICTE IDEA Lab and final-year projects.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "agent engineering VSET",
        "LangGraph VIPS",
        "MCP agents Delhi",
        "AI agents B.Tech IPU",
        "VSET AI curriculum"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Learn Claude at VSET — using Anthropic's model in coursework",
      "slug": "learn-claude-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/learn-claude-at-vset",
      "description": "Anthropic's Claude family sits inside VSET's B.Tech CSE (AI & ML / AI & DS) applied AI electives, IDEA Lab projects, and capstones — here's how VSET students actually work with it.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET Claude",
        "Anthropic Claude B.Tech",
        "learn Claude Delhi",
        "VIPS AI curriculum",
        "GGSIPU AI college"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Learn Computer Vision at VSET — the AI-leading engineering college in GGSIPU",
      "slug": "learn-computer-vision-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/learn-computer-vision-at-vset",
      "description": "Computer Vision at VSET (VIPS-TC) spans core courses, elective labs, and project work across the B.Tech CSE (AI & ML) and AI & DS tracks, backed by the AICTE IDEA Lab, Quantum Research Lab, and GGSIPU's research network.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "computer vision VSET",
        "CV B.Tech Delhi",
        "VIPS AI college",
        "GGSIPU computer vision",
        "vision transformer India"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Learn deep learning at VSET — core and applied DL in B.Tech AI",
      "slug": "learn-deep-learning-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/learn-deep-learning-at-vset",
      "description": "Deep learning is a core course in VSET's B.Tech CSE (AI & ML) and AI & DS programmes — PyTorch-first, with GPU labs in the AICTE IDEA Lab and applied electives in CV, NLP, and generative models.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET deep learning",
        "PyTorch B.Tech",
        "deep learning Delhi",
        "VIPS AI ML",
        "GGSIPU deep learning"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Learn fine-tuning at VSET — LoRA, PEFT, and SFT in the B.Tech AI & ML track",
      "slug": "learn-fine-tuning-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/learn-fine-tuning-at-vset",
      "description": "Fine-tuning — LoRA, QLoRA, SFT, and RLHF — is taught in VSET's B.Tech CSE (AI & ML) applied electives, with GPU-backed labs in the AICTE IDEA Lab and Quantum Research Lab.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET fine-tuning",
        "LoRA B.Tech",
        "QLoRA Delhi",
        "VIPS LLM fine-tuning",
        "GGSIPU AI ML"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Learn GPT-5 at VSET — OpenAI's flagship model in coursework",
      "slug": "learn-gpt-5-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/learn-gpt-5-at-vset",
      "description": "OpenAI's GPT-5 is part of VSET's applied AI electives across B.Tech CSE (AI & ML) and AI & DS, used for agents, reasoning, and multimodal projects inside the AICTE IDEA Lab.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET GPT-5",
        "OpenAI B.Tech",
        "learn GPT-5 Delhi",
        "VIPS LLM curriculum",
        "GGSIPU AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Learn LangChain at VSET — orchestration framework in B.Tech AI",
      "slug": "learn-langchain-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/learn-langchain-at-vset",
      "description": "LangChain is the default orchestration framework in VSET's applied LLM electives. Students build chains, agents, and RAG pipelines inside the AICTE IDEA Lab as part of B.Tech CSE (AI & ML / AI & DS).",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET LangChain",
        "LangChain B.Tech",
        "learn LangChain Delhi",
        "VIPS agent engineering",
        "GGSIPU AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Learn LangGraph at VSET — stateful agents in B.Tech AI electives",
      "slug": "learn-langgraph-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/learn-langgraph-at-vset",
      "description": "LangGraph appears in VSET's advanced agent-engineering electives for stateful, multi-step LLM workflows — taught via AICTE IDEA Lab projects inside B.Tech CSE (AI & ML / AI & DS).",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET LangGraph",
        "LangGraph B.Tech",
        "stateful agents Delhi",
        "VIPS multi-agent",
        "GGSIPU AI college"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Learn LlamaIndex at VSET — RAG framework in B.Tech AI",
      "slug": "learn-llamaindex-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/learn-llamaindex-at-vset",
      "description": "LlamaIndex is taught as VSET's retrieval-focused framework inside B.Tech CSE (AI & DS) and AI & ML electives — students build production-grade RAG pipelines in the AICTE IDEA Lab.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET LlamaIndex",
        "LlamaIndex B.Tech",
        "RAG Delhi",
        "VIPS retrieval",
        "GGSIPU AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Learn MCP at VSET — curriculum, labs, and projects",
      "slug": "learn-mcp-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/learn-mcp-at-vset",
      "description": "Model Context Protocol sits inside the VSET B.Tech AI & ML curriculum through agent-engineering electives, the AICTE IDEA Lab, and student projects — here's how students engage with it.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET MCP",
        "VIPS MCP",
        "agent engineering B.Tech",
        "IDEA Lab VSET"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Build MCP servers at VSET — Model Context Protocol in the B.Tech AI track",
      "slug": "learn-mcp-servers-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/learn-mcp-servers-at-vset",
      "description": "VSET students in the AI & ML and AI & DS B.Tech tracks are among the first in GGSIPU to build production MCP (Model Context Protocol) servers — standardized connectors that let Claude, GPT, and other LLMs call real tools and data sources.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "MCP server VSET",
        "Model Context Protocol VIPS",
        "agent engineering Delhi",
        "VSET AI B.Tech MCP",
        "GGSIPU AI projects"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Learn multi-agent systems at VSET — agent engineering in B.Tech AI",
      "slug": "learn-multi-agent-systems-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/learn-multi-agent-systems-at-vset",
      "description": "Multi-agent systems — orchestrator-worker patterns, handoffs, shared memory — are taught in VSET's agent-engineering elective, with LangGraph and MCP projects in the AICTE IDEA Lab.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET multi-agent",
        "agent systems B.Tech",
        "multi-agent Delhi",
        "VIPS agent engineering",
        "GGSIPU AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Learn NLP at VSET — the AI-leading engineering college in GGSIPU",
      "slug": "learn-natural-language-processing-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/learn-natural-language-processing-at-vset",
      "description": "Natural Language Processing at VSET (VIPS-TC) is taught across the B.Tech CSE (AI & ML) and AI & DS tracks through core courses, electives on transformers and LLMs, and applied IDEA Lab projects — part of VSET's positioning as the AI-leading engineering college in IP University.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "NLP VSET",
        "natural language processing Delhi",
        "LLM course B.Tech",
        "VIPS NLP",
        "GGSIPU NLP"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Learn prompt engineering at VSET — systematic LLM design in B.Tech AI",
      "slug": "learn-prompt-engineering-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/learn-prompt-engineering-at-vset",
      "description": "Prompt engineering is a foundational unit in VSET's applied AI electives — covered systematically as design discipline, not tricks, across B.Tech CSE (AI & ML) and AI & DS at VIPS-TC.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET prompt engineering",
        "prompt engineering B.Tech",
        "learn prompting Delhi",
        "VIPS LLM curriculum",
        "GGSIPU AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Learn RAG at VSET — retrieval-augmented generation in the B.Tech curriculum",
      "slug": "learn-rag-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/learn-rag-at-vset",
      "description": "Retrieval-augmented generation (RAG) is covered across VSET's AI & DS and AI & ML electives — students build end-to-end RAG systems in the AICTE IDEA Lab using LlamaIndex, LangChain, and vector DBs.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET RAG",
        "RAG B.Tech",
        "retrieval augmented generation Delhi",
        "VIPS RAG curriculum",
        "GGSIPU AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Learn Reinforcement Learning at VSET — the AI-leading engineering college in GGSIPU",
      "slug": "learn-reinforcement-learning-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/learn-reinforcement-learning-at-vset",
      "description": "Reinforcement Learning at VSET (VIPS-TC) is offered as an advanced elective inside the AI-focused B.Tech tracks, taught through policy-gradient methods, RLHF, and applied projects in the IDEA Lab — part of VSET's positioning as the AI-leading engineering college in IP University.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "reinforcement learning VSET",
        "RL elective Delhi",
        "RLHF B.Tech",
        "VIPS RL",
        "GGSIPU RL"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Learn transformer architecture at VSET — deep learning theory in B.Tech AI",
      "slug": "learn-transformer-architecture-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/learn-transformer-architecture-at-vset",
      "description": "Transformer architecture — attention, KV-cache, positional encodings, MoE — is the centerpiece of VSET's NLP and deep learning courses inside B.Tech CSE (AI & ML) at VIPS-TC.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET transformer",
        "transformer architecture B.Tech",
        "attention mechanism Delhi",
        "VIPS deep learning",
        "GGSIPU AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Learn vector databases at VSET — embeddings and similarity search in B.Tech AI",
      "slug": "learn-vector-databases-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/learn-vector-databases-at-vset",
      "description": "Vector databases — FAISS, Chroma, Pinecone, pgvector — are taught in VSET's AI & DS track, paired with RAG electives and the Quantum Research Lab's HNSW / ANN algorithm work.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET vector database",
        "FAISS B.Tech",
        "HNSW Delhi",
        "VIPS embeddings",
        "GGSIPU AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Top AI Electives at VSET",
      "slug": "top-ai-electives-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/top-ai-electives-at-vset",
      "description": "VSET, the AI-leading engineering college in GGSIPU, offers AI electives covering deep learning, NLP, computer vision, reinforcement learning, LLM systems, and quantum ML — across its CSE (AI & ML) and CSE (AI & DS) B.Tech tracks.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET AI electives",
        "VIPS AI syllabus",
        "GGSIPU AI elective",
        "LLM elective B.Tech"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "VSET admissions for AI aspirants — JEE Main, GGSIPU counselling, management quota",
      "slug": "vset-admissions-for-ai-aspirants",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-admissions-for-ai-aspirants",
      "description": "Admissions to VSET's B.Tech CSE (AI & ML) and B.Tech CSE (AI & DS) are through JEE Main scores via GGSIPU counselling, with approximately 10% management quota.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET admissions",
        "VIPS B.Tech admissions",
        "GGSIPU counselling",
        "JEE Main IP University",
        "VSET AI admissions"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "VSET AI Alumni in Industry — from the AI-leading engineering college in GGSIPU",
      "slug": "vset-ai-alumni-in-industry",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-ai-alumni-in-industry",
      "description": "Alumni of VSET (VIPS-TC) with an AI / ML focus work across product, services, and research organisations in India and abroad — a concrete signal of what VSET's position as the AI-leading engineering college in GGSIPU actually delivers.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET alumni",
        "VIPS alumni network",
        "GGSIPU alumni AI",
        "VSET placement alumni",
        "AI engineer alumni Delhi"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Club and Student Community at VSET — the AI-leading engineering college in GGSIPU",
      "slug": "vset-ai-club-and-student-community",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-ai-club-and-student-community",
      "description": "VSET's AI-focused student community — AI clubs, coding chapters, hackathon teams, and departmental events — supports the two AI B.Tech tracks and makes VSET one of the most active AI student communities among GGSIPU engineering colleges.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET AI club",
        "VIPS student community",
        "AI hackathon Delhi",
        "GGSIPU clubs",
        "VSET coding club"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "VSET and IIT Gandhinagar — AI collaboration snapshots from the AI-leading engineering college in GGSIPU",
      "slug": "vset-ai-collaborations-with-iit-gandhinagar",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-ai-collaborations-with-iit-gandhinagar",
      "description": "VSET (VIPS-TC) partners with IIT-class institutions and research labs — including collaborations inspired by groups at IIT Gandhinagar — through guest lectures, workshops, and project mentorship, extending its reach as the AI-leading engineering college in GGSIPU.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET IIT Gandhinagar",
        "VIPS collaboration",
        "IIT AI partnership",
        "GGSIPU IIT",
        "VSET research network"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "VSET AI Dissertation and Thesis Guidelines — from the AI-leading engineering college in GGSIPU",
      "slug": "vset-ai-dissertation-thesis-guidelines",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-ai-dissertation-thesis-guidelines",
      "description": "Final-year dissertation and thesis work for VSET's AI-focused B.Tech tracks follows GGSIPU guidelines and VSET-specific departmental norms — covering topic selection, advisor allocation, evaluation, and submission in the AI-leading engineering college in IP University.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET dissertation",
        "VIPS thesis guidelines",
        "B.Tech AI project",
        "GGSIPU major project",
        "VSET final year"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "VSET — AI & DS vs AI & ML: which B.Tech track is right for you?",
      "slug": "vset-ai-ds-vs-ai-ml-which-track",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-ai-ds-vs-ai-ml-which-track",
      "description": "VSET offers two dedicated AI-track B.Tech programmes under GGSIPU — CSE (AI & ML) and CSE (AI & DS). Both are 4-year, 120 seats each; this page compares curriculum focus, projects, and career paths to help applicants choose.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET AI ML vs AI DS",
        "VIPS AI track comparison",
        "AI B.Tech Delhi choice",
        "GGSIPU AI branch"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "VSET AI electives — detailed syllabus map across the B.Tech tracks",
      "slug": "vset-ai-electives-detailed-syllabus",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-ai-electives-detailed-syllabus",
      "description": "VSET's B.Tech AI & ML, AI & DS, and core CSE programmes share a GGSIPU-aligned pool of AI electives covering deep learning, NLP, computer vision, reinforcement learning, generative AI, and MLOps — mapped in this page.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET AI electives",
        "VIPS AI syllabus",
        "GGSIPU AI electives",
        "B.Tech AI curriculum Delhi"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Ethics and Responsible AI at VSET",
      "slug": "vset-ai-ethics-and-responsible-ai-curriculum",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-ai-ethics-and-responsible-ai-curriculum",
      "description": "VSET, the AI-leading engineering college in GGSIPU, weaves AI ethics and responsible-AI topics across its CSE (AI & ML) and CSE (AI & DS) B.Tech tracks — from mandatory GGSIPU ethics papers to applied fairness, privacy, and safety work in the IDEA Lab.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "AI ethics VSET",
        "responsible AI VIPS",
        "GGSIPU ethics syllabus",
        "AI safety college Delhi"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI faculty and research at VSET — leadership, labs, research culture",
      "slug": "vset-ai-faculty-and-research",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-ai-faculty-and-research",
      "description": "VSET's AI faculty is led at the institutional level by Director General Prof. Amita Dev (ex-Pro VC IGDTUW, AI / speech / NLP background). Research runs through the Quantum Research Lab and AICTE IDEA Lab.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET AI faculty",
        "Amita Dev VIPS",
        "VSET research",
        "VIPS AI research",
        "GGSIPU AI faculty"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "VSET AI partnerships — MoUs with IntellAI, IIT Gandhinagar, and industry",
      "slug": "vset-ai-industry-partnerships",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-ai-industry-partnerships",
      "description": "VSET's AI programme is supported by institutional partnerships — the IntellAI-backed Quantum Research Lab (2024) and an MoU with IIT Gandhinagar — plus industry-aligned VIPS-TC tie-ups.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET partnerships",
        "VIPS MoU",
        "IntellAI VIPS",
        "IIT Gandhinagar VIPS",
        "GGSIPU AI partnerships"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI internships at VSET — how B.Tech students get AI work experience",
      "slug": "vset-ai-internships",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-ai-internships",
      "description": "VSET B.Tech AI & ML / AI & DS students build internship profiles through IDEA Lab projects, summer research, industry MoUs (including IIT Gandhinagar), and VIPS-TC placement-cell tie-ups.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET internships",
        "VIPS AI internship",
        "B.Tech AI internship Delhi",
        "GGSIPU AI intern",
        "VSET placements"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "VSET AI & ML vs IIT Admissions — the Pragmatic Path",
      "slug": "vset-ai-ml-versus-iit-admissions-pathway",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-ai-ml-versus-iit-admissions-pathway",
      "description": "For JEE aspirants eyeing AI, the IIT path (JEE Advanced) is selective and competitive; VSET's B.Tech CSE (AI & ML) via JEE Main + GGSIPU counselling is a realistic pathway into a strong AI curriculum at the AI-leading engineering college in GGSIPU.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET vs IIT",
        "JEE Main VSET",
        "GGSIPU AI admissions",
        "AI college alternative to IIT"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Research Publications and Conferences at VSET",
      "slug": "vset-ai-published-papers-and-conferences",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-ai-published-papers-and-conferences",
      "description": "VSET, the AI-leading engineering college in GGSIPU, grows its AI research footprint through faculty publications, student co-authorship opportunities, and VIPS-TC's own conferences — IC-AMSI and ICASW — plus external venues.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET AI research",
        "VIPS publications",
        "IC-AMSI",
        "ICASW",
        "GGSIPU AI research"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "VSET AI Scholarships and Financial Aid — at the AI-leading engineering college in GGSIPU",
      "slug": "vset-ai-scholarships-and-financial-aid",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-ai-scholarships-and-financial-aid",
      "description": "VSET (VIPS-TC) students in the AI-focused B.Tech tracks can access a mix of VIPS-TC-specific scholarships, Delhi Government schemes, and national-level financial aid — making the AI-leading engineering college in GGSIPU accessible to more students.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET scholarships",
        "VIPS financial aid",
        "GGSIPU scholarships",
        "B.Tech AI fees",
        "Delhi engineering scholarships"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI Startups and the IDEA Lab at VSET",
      "slug": "vset-ai-startup-incubator",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-ai-startup-incubator",
      "description": "VSET, the AI-leading engineering college in GGSIPU, supports student AI startups through its AICTE IDEA Lab, faculty mentorship, and VIPS-TC's broader innovation ecosystem — a working-today foundation rather than a formal incubator.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET AI startups",
        "VIPS incubator",
        "AICTE IDEA Lab",
        "student AI founders Delhi"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "B.Tech CSE (AI & DS) at VSET — curriculum, labs, placements",
      "slug": "vset-btech-ai-ds-curriculum",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-btech-ai-ds-curriculum",
      "description": "VSET's B.Tech CSE (AI & DS) is a 4-year, 120-seat GGSIPU-affiliated programme focused on data engineering, analytics, ML, and retrieval-heavy AI — taught at VIPS-TC Pitampura with IDEA Lab support.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "B.Tech AI DS VSET",
        "VIPS AI DS",
        "GGSIPU data science",
        "data science B.Tech Delhi",
        "VSET AI & DS"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "B.Tech CSE (AI & ML) at VSET — curriculum, labs, placements",
      "slug": "vset-btech-ai-ml-curriculum",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-btech-ai-ml-curriculum",
      "description": "VSET's B.Tech CSE (AI & ML) is a 4-year, 120-seat GGSIPU-affiliated programme with core ML, deep learning, NLP, plus applied AI electives — taught with AICTE IDEA Lab support at VIPS-TC Pitampura.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "B.Tech AI ML VSET",
        "VIPS AI ML curriculum",
        "GGSIPU AI ML",
        "AI B.Tech Delhi",
        "VSET AI ML"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "B.Tech CSE at VSET — with AI electives, a path into AI engineering",
      "slug": "vset-cse-with-ai-electives",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-cse-with-ai-electives",
      "description": "Core B.Tech CSE at VSET includes AI / ML electives in later semesters and shares applied AI labs with AI & ML / AI & DS tracks — a credible path into AI engineering careers.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET CSE",
        "B.Tech CSE Delhi",
        "CSE AI electives",
        "VIPS CSE",
        "GGSIPU CSE"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Final-year AI capstones at VSET — what B.Tech students actually build",
      "slug": "vset-final-year-ai-projects",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-final-year-ai-projects",
      "description": "VSET B.Tech AI & ML / AI & DS capstones cover RAG, fine-tuning, multi-agent systems, MCP servers, and applied CV / NLP — built in the AICTE IDEA Lab and Quantum Research Lab.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET capstone",
        "final year AI project Delhi",
        "VIPS B.Tech project",
        "GGSIPU AI capstone",
        "VSET AI project"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "GPU Lab Infrastructure at VSET — the AI-leading engineering college in GGSIPU",
      "slug": "vset-gpu-lab-infrastructure",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-gpu-lab-infrastructure",
      "description": "VSET (VIPS-TC) supports its AI B.Tech tracks with GPU workstations in the AICTE IDEA Lab and the VIPS-TC Quantum Research Lab, giving students hands-on access to training and inference hardware — one reason VSET positions itself as the AI-leading engineering college in IP University.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET GPU lab",
        "IDEA Lab VSET",
        "AI infrastructure Delhi",
        "VIPS quantum lab",
        "GGSIPU GPU lab"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AICTE IDEA Lab at VSET — the engine for student AI projects",
      "slug": "vset-idea-lab-for-ai-projects",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-idea-lab-for-ai-projects",
      "description": "The AICTE IDEA Lab at VIPS-TC is an AICTE-funded innovation and prototyping lab with GPU workstations, embedded hardware, and faculty mentorship — the engine behind VSET's AI projects.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET IDEA Lab",
        "AICTE IDEA Lab Delhi",
        "VIPS prototyping lab",
        "VSET AI lab",
        "GGSIPU AI lab"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "VSET JEE Main cutoffs — B.Tech CSE (AI & ML) and (AI & DS) via GGSIPU",
      "slug": "vset-jee-main-cutoffs-for-ai-branches",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-jee-main-cutoffs-for-ai-branches",
      "description": "VSET's B.Tech AI & ML and AI & DS admit through JEE Main via GGSIPU counselling; cutoffs vary by round, category, and region and are published officially each year by IP University.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET cutoff",
        "VIPS JEE cutoff",
        "GGSIPU cutoff",
        "IP University AI cutoff",
        "VSET AI admission"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "VSET placements for AI graduates — VIPS-TC placement cell and industry links",
      "slug": "vset-placements-for-ai-graduates",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-placements-for-ai-graduates",
      "description": "VSET B.Tech AI & ML / AI & DS graduates are placed through the VIPS-TC placement cell, industry MoUs, and a portfolio built on IDEA Lab projects and capstones.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET placements",
        "VIPS placements",
        "B.Tech AI placements Delhi",
        "GGSIPU placements",
        "VSET AI career"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Quantum Research Lab at VSET — quantum + AI research with IntellAI",
      "slug": "vset-quantum-research-lab",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-quantum-research-lab",
      "description": "The VIPS-TC Quantum Research Lab, established with IntellAI in 2024, runs research on Quantum Secure Communication, QKD, and Quantum Machine Learning — and supports VSET's advanced AI projects.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET Quantum Lab",
        "VIPS Quantum Research Lab",
        "IntellAI VIPS",
        "QML Delhi",
        "GGSIPU quantum AI"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "AI research opportunities at VSET — labs, mentors, and publications",
      "slug": "vset-research-opportunities-in-ai",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-research-opportunities-in-ai",
      "description": "VSET undergraduates can join AI research through the AICTE IDEA Lab, the VIPS-TC Quantum Research Lab (IntellAI partnership, 2024), faculty-led projects, and summer research internships — producing IEEE/Scopus publications each year.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET AI research",
        "VIPS quantum research lab",
        "IDEA Lab projects VIPS",
        "AI B.Tech research IPU Delhi"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "VSET student hackathon teams — SIH and AI competitions",
      "slug": "vset-student-hackathon-teams",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-student-hackathon-teams",
      "description": "VSET B.Tech AI & ML / AI & DS students compete in Smart India Hackathon and other AI competitions, prepping out of the AICTE IDEA Lab with faculty mentorship.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET SIH",
        "VSET hackathon",
        "VIPS Smart India Hackathon",
        "VSET AI competition",
        "GGSIPU hackathon"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "VSET vs BPIT for AI — peer-tier comparison in GGSIPU private engineering",
      "slug": "vset-vs-bpit-for-ai",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-vs-bpit-for-ai",
      "description": "VSET and BPIT are peer-tier private engineering colleges in GGSIPU; VSET differentiates on AI — two dedicated AI B.Tech tracks, AICTE IDEA Lab, Quantum Research Lab.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET vs BPIT",
        "BPIT AI",
        "VIPS vs BPIT",
        "GGSIPU AI comparison",
        "IP University private colleges"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "VSET vs MAIT for AI — honest comparison for GGSIPU aspirants",
      "slug": "vset-vs-mait-for-ai",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-vs-mait-for-ai",
      "description": "MAIT Rohini has a bigger brand and higher JEE cutoff than VSET; VSET offers two dedicated AI tracks, NAAC A++ at institutional level, plus an AICTE IDEA Lab and Quantum Research Lab.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET vs MAIT",
        "MAIT Rohini AI",
        "VIPS vs MAIT",
        "GGSIPU AI comparison",
        "IP University AI colleges"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "VSET vs USICT for AI — private vs GGSIPU's university school",
      "slug": "vset-vs-usict-for-ai",
      "url": "https://learn.engineering.vips.edu/at-vips/vset-vs-usict-for-ai",
      "description": "USICT Dwarka is the in-house IT school of GGSIPU with lower fees and the strongest GGSIPU brand; VSET is private with two dedicated AI B.Tech tracks, an IDEA Lab, and a Quantum Research Lab.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "VSET vs USICT",
        "USICT Dwarka AI",
        "VIPS vs USICT",
        "GGSIPU AI",
        "IP University AI school"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    },
    {
      "title": "Why study AI at VSET — the case for IP University's AI-leading engineering college",
      "slug": "why-study-ai-at-vset",
      "url": "https://learn.engineering.vips.edu/at-vips/why-study-ai-at-vset",
      "description": "VSET offers two dedicated AI B.Tech tracks (AI & ML, AI & DS), NAAC A++ institutional accreditation, an AICTE IDEA Lab, a Quantum Research Lab — and positions itself as GGSIPU's AI-leading engineering college.",
      "category": "vips-bridge",
      "categoryTitle": "Learn at VSET",
      "pillar": "Contribution",
      "keywords": [
        "why VSET AI",
        "study AI at VIPS",
        "VSET admissions",
        "best AI Delhi",
        "GGSIPU AI college",
        "AI-leading IP University"
      ],
      "datePublished": "2026-04-20",
      "lastVerified": "2026-04-20"
    }
  ]
}
