<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0"
  xmlns:atom="http://www.w3.org/2005/Atom"
  xmlns:dc="http://purl.org/dc/elements/1.1/"
  xmlns:content="http://purl.org/rss/1.0/modules/content/">
  <channel>
    <title>Treeru Blog</title>
    <link>https://treeru.com</link>
    <description>Insights and practical guides on web development, IT infrastructure, and AI solutions</description>
    <language>en</language>
    <managingEditor>admin@treeru.com (Treeru)</managingEditor>
    <webMaster>admin@treeru.com (Treeru)</webMaster>
    <lastBuildDate>Sat, 09 May 2026 15:33:15 GMT</lastBuildDate>
    <generator>Treeru Next.js RSS Generator</generator>
    <docs>https://www.rssboard.org/rss-specification</docs>
    <atom:link href="https://treeru.com/rss?lang=en" rel="self" type="application/rss+xml"/>
    
    <item>
      <title>Automating Full-Page Screenshots of Web Admin UIs with Playwright — 170 Pages in 8 Minutes</title>
      <link>https://treeru.com/en/blog/playwright-admin-ui-fullpage-screenshot-automation</link>
      <description>How to walk every sub-page of a firewall or router admin UI while logged in and save fullPage PNGs. A standalone Playwright script captures 170+ pages in about 8 minutes.</description>
      <pubDate>Tue, 21 Apr 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/playwright-admin-ui-fullpage-screenshot-automation</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>TOOL</category>
      <category>Playwright</category>
      <category>Screenshot</category>
      <category>Automation</category>
      <category>Web Admin UI</category>
      <category>Backup</category>
      <category>CLI</category>
      <category>Node script</category>
    </item>
    <item>
      <title>CQRS Documents by Greg Young — An English Reader&apos;s Guide</title>
      <link>https://treeru.com/en/blog/cqrs-documents-greg-young-korean</link>
      <description>A chapter-by-chapter reader&apos;s guide to Greg Young&apos;s 56-page CQRS Documents. Summaries, key takeaways, and commentary on Task Based UI, CQRS, Event Sourcing, and Event Storage.</description>
      <pubDate>Tue, 14 Apr 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/cqrs-documents-greg-young-korean</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>
      <category>CQRS</category>
      <category>DDD</category>
      <category>Event Sourcing</category>
      <category>Greg Young</category>
      <category>Domain Driven Design</category>
      <category>architecture</category>
    </item>
    <item>
      <title>What If We Run EXAONE 4.5-33B on RTX PRO 6000?</title>
      <link>https://treeru.com/en/blog/exaone-45-33b-rtx-pro-6000-benchmark</link>
      <description>LG AI Research EXAONE 4.5-33B benchmarked on RTX PRO 6000 Blackwell on launch day. FP8 NaN crash, custom fork required, BF16 85 questions across 12 scenarios.</description>
      <pubDate>Fri, 10 Apr 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/exaone-45-33b-rtx-pro-6000-benchmark</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>
      <category>EXAONE</category>
      <category>LG AI Research</category>
      <category>RTX PRO 6000</category>
      <category>SGLang</category>
      <category>LLM Benchmark</category>
      <category>Korean AI</category>
      <category>VLM</category>
      <category>Local AI</category>
      <category>Blackwell</category>
    </item>
    <item>
      <title>Zellij Upgrade for Claude Code SSH — Why I Switched from tmux</title>
      <link>https://treeru.com/en/blog/zellij-claude-code-ssh-session</link>
      <description>Practical guide to setting up Zellij for Claude Code SSH workflows. zj automation script, Windows bat files, known bugs, and 4 real advantages over tmux.</description>
      <pubDate>Wed, 08 Apr 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/zellij-claude-code-ssh-session</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>CLAUDE-CODE</category>
      <category>zellij</category>
      <category>tmux</category>
      <category>Claude Code</category>
      <category>SSH</category>
      <category>terminal</category>
      <category>session management</category>
      <category>Linux</category>
      <category>server management</category>
      <category>automation</category>
      <category>Windows Terminal</category>
    </item>
    <item>
      <title>Turning OPNsense IDS from Passive to Active in 30 Minutes with AI — Stop Ignoring Your Firewall</title>
      <link>https://treeru.com/en/blog/opnsense-ai-ids-security-automation</link>
      <description>Three reasons your OPNsense IDS alerts on everything but blocks nothing — and how we used AI and the REST API to apply 2,310 DROP rules and block 22 real attacks in 30 minutes.</description>
      <pubDate>Mon, 06 Apr 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/opnsense-ai-ids-security-automation</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>네트워크</category>
      <category>OPNsense</category>
      <category>IDS</category>
      <category>Firewall</category>
      <category>Security Automation</category>
      <category>AI</category>
      <category>Network Security</category>
      <category>Suricata</category>
    </item>
    <item>
      <title>Gemma 4 Benchmark on RTX PRO 6000 — 8 Attempts, 3 Models, and a Qwen3 Showdown</title>
      <link>https://treeru.com/en/blog/gemma4-rtx-pro-6000-benchmark</link>
      <description>Two days after Gemma 4 launch, we benchmarked it on RTX PRO 6000 Blackwell (96GB). After 8 failed attempts with SGLang and vLLM, AWQ quantization delivered 25/25 success. Gemma 4 26B MoE beat Qwen3-32B by 37% in speed.</description>
      <pubDate>Sun, 05 Apr 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/gemma4-rtx-pro-6000-benchmark</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>
      <category>Gemma4</category>
      <category>RTX PRO 6000</category>
      <category>vLLM</category>
      <category>SGLang</category>
      <category>AWQ</category>
      <category>LLM Benchmark</category>
      <category>Qwen3</category>
      <category>Local AI</category>
    </item>
    <item>
      <title>Introduction to Binary Logging — Comparing 6 Formats and Selection Criteria</title>
      <link>https://treeru.com/en/blog/avro-binary-logging-why-not-json</link>
      <description>Comparing 6 binary logging formats (MessagePack, Protobuf, CBOR, FlatBuffers, Avro, Cap&apos;n Proto) by speed, size, schema support, and AI training fitness.</description>
      <pubDate>Thu, 02 Apr 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/avro-binary-logging-why-not-json</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>로그</category>
      <category>Avro</category>
      <category>Binary Logging</category>
      <category>Logging</category>
      <category>MessagePack</category>
      <category>Protobuf</category>
      <category>AI Training</category>
      <category>Schema Evolution</category>
      <category>Async Queue</category>
    </item>
    <item>
      <title>Embedding Infrastructure Across 15 Servers — CPU Selection to Architecture Tiers</title>
      <link>https://treeru.com/en/blog/embedding-infrastructure-15-servers-cpu-selection</link>
      <description>Real-world embedding benchmark across 15 servers. 7 CPU architectures compared, RAM/NVMe proven irrelevant, pipeline share analyzed, server tiers defined.</description>
      <pubDate>Sun, 29 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/embedding-infrastructure-15-servers-cpu-selection</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>TOOL</category>
      <category>embedding</category>
      <category>infrastructure</category>
      <category>CPU</category>
      <category>RAG</category>
      <category>AI server</category>
      <category>server architecture</category>
      <category>performance optimization</category>
    </item>
    <item>
      <title>Tmux + Claude Code — Keep AI Working Even When SSH Drops</title>
      <link>https://treeru.com/en/blog/tmux-claude-code-ssh-persistent-session</link>
      <description>Practical guide to preventing Claude Code session loss on SSH drops using tmux. Full .tmux.conf, automation script, Windows bat files, and troubleshooting.</description>
      <pubDate>Sun, 29 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/tmux-claude-code-ssh-persistent-session</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>CLAUDE-CODE</category>
      <category>tmux</category>
      <category>Claude Code</category>
      <category>SSH</category>
      <category>terminal</category>
      <category>session management</category>
      <category>Linux</category>
      <category>server management</category>
      <category>automation</category>
    </item>
    <item>
      <title>Restore winget + Windows Terminal without Microsoft Store and Install Claude Code</title>
      <link>https://treeru.com/en/blog/windows-terminal-winget-claude-code-install</link>
      <description>A hands-on guide to recovering winget on a broken Windows 11 system by manually installing VCLibs, UI.Xaml, and App Installer, then setting up Windows Terminal and Claude Code.</description>
      <pubDate>Sun, 29 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/windows-terminal-winget-claude-code-install</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>CLAUDE-CODE</category>
      <category>winget</category>
      <category>Windows Terminal</category>
      <category>Claude Code</category>
      <category>Windows 11</category>
      <category>PowerShell</category>
      <category>App Installer</category>
      <category>manual install</category>
      <category>Microsoft Store</category>
    </item>
    <item>
      <title>AI Image Generation API Price Comparison 2026 — Automating Blog Thumbnails with GPT Image, Gemini, FLUX &amp; DALL-E</title>
      <link>https://treeru.com/en/blog/ai-image-generation-api-price-comparison-2026</link>
      <description>Real-world price comparison of 6 AI image generation APIs for blog thumbnail automation. GPT Image 1.5 Low at $0.009/image to Gemini 3.1 Flash at $0.045. Includes OCR text detection safeguard.</description>
      <pubDate>Wed, 25 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/ai-image-generation-api-price-comparison-2026</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>

    </item>
    <item>
      <title>One Year of Automated Blogging with Claude API — Real Costs and Natural Writing Tricks</title>
      <link>https://treeru.com/en/blog/claude-api-blog-automation-cost-1year</link>
      <description>After running a Claude Sonnet-powered blog pipeline for a full year, here are the real numbers: ~$0.04 per post, word repetition detection, and prompt tricks to make AI writing sound human.</description>
      <pubDate>Wed, 25 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/claude-api-blog-automation-cost-1year</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>

    </item>
    <item>
      <title>NVIDIA Driver 595 Upgrade Benchmark — RTX PRO 6000 Blackwell AI Performance Changes</title>
      <link>https://treeru.com/en/blog/nvidia-driver-595-blackwell-upgrade</link>
      <description>Real-world benchmarks after upgrading from NVIDIA driver 590 to 595 on RTX PRO 6000 Blackwell (96GB). Covers SGLang token speed, CUDA compute, memory bandwidth comparisons, and the critical Power Limit reset issue.</description>
      <pubDate>Wed, 25 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/nvidia-driver-595-blackwell-upgrade</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>
      <category>NVIDIA</category>
      <category>driver upgrade</category>
      <category>RTX PRO 6000</category>
      <category>Blackwell</category>
      <category>CUDA</category>
      <category>SGLang</category>
      <category>benchmark</category>
      <category>local AI</category>
    </item>
    <item>
      <title>Python Startup From 78s to 20s — Finding Bottlenecks With Log Timelines</title>
      <link>https://treeru.com/en/blog/python-startup-speed-optimization-78s-to-20s</link>
      <description>Cut Python program startup from 78 seconds to 20 seconds on an N100 mini PC. Found bottlenecks using log timeline analysis, reduced GitHub API calls from 44 to 5, and eliminated unnecessary waits.</description>
      <pubDate>Wed, 25 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/python-startup-speed-optimization-78s-to-20s</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>
      <category>Python</category>
      <category>performance optimization</category>
      <category>startup speed</category>
      <category>log analysis</category>
      <category>GitHub API</category>
      <category>N100</category>
    </item>
    <item>
      <title>Selenium Naver CAPTCHA Fix 2026 — The $cdc_ Variable After 7 Failed Attempts</title>
      <link>https://treeru.com/en/blog/selenium-naver-captcha-undetected-chromedriver</link>
      <description>Selenium Naver auto-login suddenly started triggering CAPTCHAs after 1 year. UA changes, stealth scripts, persistent profiles, random delays — 7 failed attempts before finding the real cause: the $cdc_ variable.</description>
      <pubDate>Wed, 25 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/selenium-naver-captcha-undetected-chromedriver</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>
      <category>Selenium</category>
      <category>Naver</category>
      <category>CAPTCHA</category>
      <category>undetected-chromedriver</category>
      <category>Bot Detection</category>
      <category>Python</category>
      <category>Automation</category>
    </item>
    <item>
      <title>Why Hardcoding Selenium User-Agent Gets You Caught — Chrome Version Mismatch Bot Detection</title>
      <link>https://treeru.com/en/blog/selenium-user-agent-hardcoding-bot-detection</link>
      <description>Hardcoded Chrome/114 User-Agent worked for a year until Chrome auto-updated to 146. Learn how UA and TLS fingerprint version mismatch triggers bot detection and the one-line fix.</description>
      <pubDate>Wed, 25 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/selenium-user-agent-hardcoding-bot-detection</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>

    </item>
    <item>
      <title>How I Wasted a Day Trying to Hand Off undetected-chromedriver via Subprocess</title>
      <link>https://treeru.com/en/blog/undetected-chromedriver-subprocess-issue</link>
      <description>Tried spawning Chrome with UC in a subprocess and handing it to the main process via debug port. Hit 4 deal-breaking issues: random port override, no detach, stdout pipe inheritance, and invisible tabs.</description>
      <pubDate>Wed, 25 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/undetected-chromedriver-subprocess-issue</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>
      <category>undetected-chromedriver</category>
      <category>subprocess</category>
      <category>Selenium</category>
      <category>Chrome</category>
      <category>Python</category>
      <category>bot detection</category>
    </item>
    <item>
      <title>TreeRU — Terminal File Explorer with Clipboard Screenshot Saving</title>
      <link>https://treeru.com/en/blog/treeru-terminal-file-explorer</link>
      <description>A terminal file explorer built for AI CLI workflows. Auto-saves screenshots to the current folder, copies file paths with one shortcut, and browses remote servers over SSH. Open-source, installs in 30 seconds.</description>
      <pubDate>Fri, 20 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/treeru-terminal-file-explorer</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>TOOL</category>
      <category>TreeRU</category>
      <category>Terminal</category>
      <category>File Explorer</category>
      <category>CLI</category>
      <category>Screenshot</category>
      <category>SSH</category>
      <category>Open Source</category>
    </item>
    <item>
      <title>8 Time-Series Database Engines Compared — 22 Million Row Benchmark</title>
      <link>https://treeru.com/en/blog/timeseries-database-comparison-8-engines</link>
      <description>We benchmarked 8 time-series database engines with 22 million rows. QuestDB 25ms, ClickHouse 547ms, TimescaleDB 1,021ms — real measured results with 6 evaluation criteria.</description>
      <pubDate>Tue, 10 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/timeseries-database-comparison-8-engines</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>스토리지</category>

    </item>
    <item>
      <title>DB + RAG Hybrid Search — How We Improved LLM Fact Accuracy by 5x</title>
      <link>https://treeru.com/en/blog/hybrid-search-db-rag-fact-accuracy-5x</link>
      <description>Parallel DB + RAG search architecture that boosted LLM fact accuracy from 17.5% to 92.5%. 4-case result assembly, 18ms search overhead, schema matching at 95% success rate. Real benchmarks included.</description>
      <pubDate>Wed, 04 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/hybrid-search-db-rag-fact-accuracy-5x</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>

    </item>
    <item>
      <title>Linux Server Setup Checklist — 8 Steps from BIOS to SMART Alerts</title>
      <link>https://treeru.com/en/blog/linux-server-setup-checklist-8-steps</link>
      <description>A battle-tested 8-step checklist for new Linux server installations. Covers BIOS auto-boot, SSH key authentication, kernel.panic, journald persistence, and smartmontools — verified across 16 production servers.</description>
      <pubDate>Wed, 04 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/linux-server-setup-checklist-8-steps</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>TOOL</category>
      <category>linux server</category>
      <category>server setup</category>
      <category>SSH key authentication</category>
      <category>smartmontools</category>
      <category>kernel.panic</category>
      <category>journald</category>
      <category>checklist</category>
      <category>Ubuntu</category>
    </item>
    <item>
      <title>LLM Temperature 0.1–0.9: 300-Run Experiment Reveals Optimal Settings per Use Case</title>
      <link>https://treeru.com/en/blog/llm-decoding-parameter-temperature-comparison</link>
      <description>60 questions × 5 temperature levels = 300 experiments on Qwen3-14B. Hallucination rate jumps from 22% at T=0.3 to 72% at T=0.9. Includes per-scenario optimal temperature guide and GPU comparison.</description>
      <pubDate>Wed, 04 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/llm-decoding-parameter-temperature-comparison</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>

    </item>
    <item>
      <title>Local LLM Benchmark: 6 Models Tested Across 60 Questions and 7 Business Scenarios</title>
      <link>https://treeru.com/en/blog/local-llm-6-model-korean-benchmark</link>
      <description>Head-to-head benchmark of Qwen3-14B, Gemma-12B, KORMo-10B, Qwen3-8B, Phi-4, and Llama-3.1-8B across 60 questions in 7 business scenarios. Hallucination trap results, critical defects, and speed vs quality tradeoffs.</description>
      <pubDate>Wed, 04 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/local-llm-6-model-korean-benchmark</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>

    </item>
    <item>
      <title>Building a Local RAG Pipeline — From Embedding Model Selection to Hallucination Elimination</title>
      <link>https://treeru.com/en/blog/local-rag-pipeline-embedding-to-hallucination-fix</link>
      <description>Step-by-step RAG pipeline build with BGE-M3 vs mE5-Large comparison (100% vs 86.7% recall), Qdrant Top-3 accuracy at 100%, and real hallucination correction examples. Only 2.9% overhead.</description>
      <pubDate>Wed, 04 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/local-rag-pipeline-embedding-to-hallucination-fix</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>

    </item>
    <item>
      <title>MoE vs Dense: Why Qwen3-30B-A3B Is Slower Than 14B — And Why Hybrid Inference Failed</title>
      <link>https://treeru.com/en/blog/moe-vs-dense-qwen3-30b-14b-comparison</link>
      <description>Real-world MoE vs Dense comparison: Qwen3-30B-A3B uses 2.1x more VRAM and runs 1.9x slower than 14B Dense. KTransformers hybrid inference produces garbage output. We traced the root cause through 18,432 weight inspections.</description>
      <pubDate>Wed, 04 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/moe-vs-dense-qwen3-30b-14b-comparison</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>

    </item>
    <item>
      <title>Multi-Server Backup &amp; Recovery Strategy — rsync Pull-Based Automated Backup and Disaster Recovery</title>
      <link>https://treeru.com/en/blog/multi-server-backup-rsync-pull-disaster-recovery</link>
      <description>A practical guide to multi-server backup using rsync Pull method with flock concurrency control, SMART disk health monitoring, and manual disaster recovery procedures for rebuilding any failed server.</description>
      <pubDate>Wed, 04 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/multi-server-backup-rsync-pull-disaster-recovery</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>스토리지</category>
      <category>server backup</category>
      <category>rsync</category>
      <category>disaster recovery</category>
      <category>SMART monitoring</category>
      <category>cron</category>
      <category>WD Red Pro</category>
      <category>cold backup</category>
      <category>NFS</category>
    </item>
    <item>
      <title></title>
      <link>https://treeru.com/en/blog/nvme-storage-benchmark-20-devices</link>
      <description>14 NVMe SSDs + 6 HDDs benchmarked with fio. Sequential Read from 6,193 to 526 MB/s, Optane 905P QD1 IOPS 3.8x faster than best NAND at 11μs latency. Full ranking tables included.</description>
      <pubDate>Wed, 04 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/nvme-storage-benchmark-20-devices</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>하드웨어</category>

    </item>
    <item>
      <title>Qwen3-32B vs 14B — Is 2x Slower Speed Worth the Quality Gain?</title>
      <link>https://treeru.com/en/blog/qwen3-32b-awq-korean-test-vs-14b</link>
      <description>We compared Qwen3-32B-AWQ and 14B-AWQ with identical 60 questions across 7 business scenarios. At 69 vs 135 tok/s, the 32B model is 2x slower — but quality improves only 5.4% on average, with legal reasoning being the sole meaningful advantage.</description>
      <pubDate>Wed, 04 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/qwen3-32b-awq-korean-test-vs-14b</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>
      <category>Qwen3-32B</category>
      <category>Qwen3-14B</category>
      <category>model size comparison</category>
      <category>Korean benchmark</category>
      <category>SGLang</category>
      <category>AWQ</category>
      <category>local LLM</category>
      <category>benchmark</category>
    </item>
    <item>
      <title>Text2SQL Real-World Test — When LLM Writes SQL Directly</title>
      <link>https://treeru.com/en/blog/text2sql-llm-router-real-world-test</link>
      <description>An LLM router classifies natural language queries, and Text2SQL generates SQL against 13 PostgreSQL tables. Tested on 40 questions across 4 difficulty tiers: LLM-only 10.8% → synonym dictionary 41.2% → Text2SQL 70.1% accuracy.</description>
      <pubDate>Wed, 04 Mar 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/text2sql-llm-router-real-world-test</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>
      <category>Text2SQL</category>
      <category>LLM Router</category>
      <category>Natural Language Query</category>
      <category>Database</category>
      <category>AI Chatbot</category>
      <category>PostgreSQL</category>
      <category>Qwen3</category>
      <category>SQL Generation</category>
    </item>
    <item>
      <title>AWQ Quantization Speed Benchmark: 16 Models, INT4 vs BF16, and the MoE Reversal</title>
      <link>https://treeru.com/en/blog/awq-quantization-speed-comparison</link>
      <description>Benchmarked 16 models on RTX PRO 6000 with SGLang. AWQ INT4: 1.88x speedup at 4B, 2.94x at 32B. MoE Qwen3-30B-A3B at 168.9 tok/s outpaces 14B Dense AWQ. Full benchmark data included.</description>
      <pubDate>Thu, 26 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/awq-quantization-speed-comparison</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>
      <category>AWQ</category>
      <category>quantization</category>
      <category>INT4</category>
      <category>BF16</category>
      <category>MoE</category>
      <category>benchmark</category>
      <category>SGLang</category>
      <category>token speed</category>
      <category>LLM optimization</category>
    </item>
    <item>
      <title>Grafana + Prometheus for 16 Servers — Installation to Alerting</title>
      <link>https://treeru.com/en/blog/grafana-prometheus-16-server-monitoring</link>
      <description>How we built unified monitoring for 16 servers with Grafana + Prometheus. Covers node_exporter installation, GPU monitoring, alert rules, dashboard design, and operational tips.</description>
      <pubDate>Thu, 26 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/grafana-prometheus-16-server-monitoring</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>TOOL</category>

    </item>
    <item>
      <title>SGLang 23-Model Serving Guide — Optimal Configuration for Every Model</title>
      <link>https://treeru.com/en/blog/sglang-multi-model-serving-guide</link>
      <description>Complete SGLang serving configurations for 23 LLMs tested in production. Covers AWQ/BF16/FP8 quantization selection, MoE flashinfer requirements, VRAM management, OOM troubleshooting, and automation scripts.</description>
      <pubDate>Thu, 26 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/sglang-multi-model-serving-guide</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>

    </item>
    <item>
      <title>Intel Optane 905P + NVMe 3-Tier Storage — AI Server Disk Strategy</title>
      <link>https://treeru.com/en/blog/ai-server-storage-strategy-optane</link>
      <description>How we built a 3-tier storage architecture for AI servers using Intel Optane 905P (10μs random reads) + Samsung 980 PRO + NVMe. Covers RAG vector DB placement, model archival, cold backup via NFS pull, and data flow design.</description>
      <pubDate>Tue, 24 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/ai-server-storage-strategy-optane</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>스토리지</category>

    </item>
    <item>
      <title>RTX 5060 Ti vs RTX PRO 6000 — 11x Price Gap, How Much Performance?</title>
      <link>https://treeru.com/en/blog/rtx-5060-ti-vs-rtx-pro-6000-cost-performance</link>
      <description>We compared RTX 5060 Ti ($450) and RTX PRO 6000 ($5,000) inference speed, concurrent throughput, and stability. At 9% of the price, the 5060 Ti delivers 35% of the performance — and a dual-GPU strategy adds 70% throughput.</description>
      <pubDate>Mon, 23 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/rtx-5060-ti-vs-rtx-pro-6000-cost-performance</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>하드웨어</category>
      <category>RTX 5060 Ti</category>
      <category>RTX PRO 6000</category>
      <category>GPU comparison</category>
      <category>cost-performance</category>
      <category>local AI</category>
      <category>multi-GPU</category>
    </item>
    <item>
      <title>RTX 5060 Ti Local AI Benchmark — What Can a $450 GPU Actually Do?</title>
      <link>https://treeru.com/en/blog/rtx-5060-ti-local-llm-benchmark</link>
      <description>We benchmarked the RTX 5060 Ti (16GB) running 8B and 14B models: single-user speed, 30-user concurrent load, multi-turn chat patterns, and cross-server inference overhead. Full measured data at $450.</description>
      <pubDate>Sat, 21 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/rtx-5060-ti-local-llm-benchmark</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>하드웨어</category>
      <category>RTX 5060 Ti</category>
      <category>GPU benchmark</category>
      <category>local AI</category>
      <category>SGLang</category>
      <category>concurrent users</category>
      <category>budget GPU</category>
    </item>
    <item>
      <title>Qwen3-14B Deep Review — Why It Is Our Top-Ranked Local LLM</title>
      <link>https://treeru.com/en/blog/qwen3-14b-deep-review</link>
      <description>We tested Qwen3-14B-AWQ with 60 questions across 7 business scenarios on two GPUs. Scoring 3.86 overall (1st place among 6 models), it excels at automation but fabricates legal citations and hallucinates non-existent features.</description>
      <pubDate>Thu, 19 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/qwen3-14b-deep-review</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>
      <category>Qwen3-14B</category>
      <category>model review</category>
      <category>Korean AI</category>
      <category>hallucination test</category>
      <category>local AI</category>
      <category>LLM evaluation</category>
    </item>
    <item>
      <title>Blog SEO in Practice — Internal Links, relatedPosts, and Series Navigation</title>
      <link>https://treeru.com/en/blog/blog-seo-internal-linking-strategy</link>
      <description>Practical internal linking strategy for blogs. Bidirectional relatedPosts, series navigation, category pages, and RSS feed automation to boost dwell time and SEO.</description>
      <pubDate>Wed, 18 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/blog-seo-internal-linking-strategy</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>SEO</category>
      <category>Internal Links</category>
      <category>SEO</category>
      <category>relatedPosts</category>
      <category>Series Navigation</category>
      <category>Blog SEO</category>
      <category>RSS</category>
    </item>
    <item>
      <title>Cross-Server AI Inference — Boosting Throughput 70% with a $450 Secondary GPU</title>
      <link>https://treeru.com/en/blog/cross-server-ai-inference-multi-gpu</link>
      <description>Real-world benchmarks of cross-server AI inference using RTX PRO 6000 + RTX 5060 Ti over 1GbE. 5-18% network overhead, 70% throughput gain, and routing strategies for production workloads.</description>
      <pubDate>Wed, 18 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/cross-server-ai-inference-multi-gpu</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>

    </item>
    <item>
      <title>RTX 5090 vs RTX PRO 6000 — AI Inference Benchmark Comparison</title>
      <link>https://treeru.com/en/blog/rtx-5090-vs-rtx-pro-6000-ai-benchmark</link>
      <description>Head-to-head RTX 5090 vs RTX PRO 6000 AI inference benchmark. At 32B models they match at ~69 tok/s. For 70B+ models, the PRO 6000&apos;s 96GB VRAM wins. GPU stress test included.</description>
      <pubDate>Mon, 16 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/rtx-5090-vs-rtx-pro-6000-ai-benchmark</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>하드웨어</category>
      <category>RTX 5090</category>
      <category>RTX PRO 6000</category>
      <category>AI benchmark</category>
      <category>LLM inference</category>
      <category>GPU comparison</category>
      <category>Blackwell</category>
      <category>llama-bench</category>
      <category>gpu_burn</category>
    </item>
    <item>
      <title>PageSpeed Insights API Automation — Site-Wide Performance at a Glance</title>
      <link>https://treeru.com/en/blog/pagespeed-insights-api-automation</link>
      <description>Stop running PageSpeed manually. Automate full-site performance measurement with PageSpeed API v5, sequential scanning, Markdown report generation, and one-click admin UI integration.</description>
      <pubDate>Sun, 15 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/pagespeed-insights-api-automation</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>
      <category>PageSpeed API</category>
      <category>automation</category>
      <category>performance monitoring</category>
      <category>Lighthouse</category>
      <category>DevOps</category>
    </item>
    <item>
      <title>Serving 7 Companies on 1 GPU — Multi-Tenant Isolation Testing in Practice</title>
      <link>https://treeru.com/en/blog/multi-tenant-ai-isolation-test</link>
      <description>We served 7 companies simultaneously from a single GPU using 5 LoRA adapters and ran 15 isolation tests covering KV cache leaks, adapter isolation, and concurrent personas. Every test passed with zero data leakage.</description>
      <pubDate>Sat, 14 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/multi-tenant-ai-isolation-test</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>
      <category>multi-tenant</category>
      <category>isolation testing</category>
      <category>LoRA</category>
      <category>KV cache</category>
      <category>security</category>
      <category>local AI</category>
      <category>SGLang</category>
    </item>
    <item>
      <title>Next.js SEO Score 100 — Metadata, Sitemap, and robots.txt</title>
      <link>https://treeru.com/en/blog/nextjs-seo-100-metadata-sitemap-robots</link>
      <description>How to achieve a perfect SEO score in Next.js. Covers generateMetadata patterns, Open Graph image auto-generation, dynamic sitemap.xml, robots.txt, JSON-LD structured data, and IndexNow.</description>
      <pubDate>Fri, 13 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/nextjs-seo-100-metadata-sitemap-robots</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>SEO</category>

    </item>
    <item>
      <title>GPU 24/7 Long-Term Monitoring — 13 Days of Real Data</title>
      <link>https://treeru.com/en/blog/gpu-long-term-monitoring-data</link>
      <description>13-day continuous monitoring of RTX PRO 6000 with 3,667 data points. 98.7% of time under 20°C, 600W vs 350W power limit comparison, 87.6% VRAM utilization patterns.</description>
      <pubDate>Thu, 12 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/gpu-long-term-monitoring-data</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>하드웨어</category>
      <category>GPU monitoring</category>
      <category>long-term operation</category>
      <category>RTX PRO 6000</category>
      <category>thermal management</category>
      <category>power limit</category>
      <category>server stability</category>
    </item>
    <item>
      <title>NFS Cold Backup Server Build — IronWolf 12TB Data Protection</title>
      <link>https://treeru.com/en/blog/nfs-cold-backup-server-ironwolf</link>
      <description>Build an NFS cold backup server with a low-power CPU and IronWolf 12TB drives. RAID 1 mirroring, rsync+cron automated backups, dual NIC traffic separation, and SMART monitoring.</description>
      <pubDate>Wed, 11 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/nfs-cold-backup-server-ironwolf</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>스토리지</category>
      <category>NFS</category>
      <category>Backup</category>
      <category>IronWolf</category>
      <category>Data Protection</category>
      <category>Server Operations</category>
    </item>
    <item>
      <title>CPU Embedding Benchmark Tool: Automated Performance Comparison Across 8 CPUs</title>
      <link>https://treeru.com/en/blog/cpu-embedding-benchmark-tool</link>
      <description>Benchmarked embedding speed across 8 CPUs with 3 models using Python. 9950X3D at 847.3 sent/s vs N100 at 123.7 — a 6.85x gap. Includes SSH remote execution, JSON output, and per-watt efficiency analysis.</description>
      <pubDate>Tue, 10 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/cpu-embedding-benchmark-tool</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>TOOL</category>
      <category>CPU benchmark</category>
      <category>embedding</category>
      <category>Python</category>
      <category>performance comparison</category>
      <category>AI</category>
      <category>automation</category>
      <category>sentence-transformers</category>
    </item>
    <item>
      <title>GPU Power Limit vs AI Performance — Undervolting and Watt Limit Real Data</title>
      <link>https://treeru.com/en/blog/gpu-power-limit-ai-performance</link>
      <description>RTX 5090 undervolting (-3.8% token gen) and RTX PRO 6000 power limit 600W→350W (-22°C temperature, -1.3% token speed). Concurrent load test data for optimal power settings.</description>
      <pubDate>Tue, 10 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/gpu-power-limit-ai-performance</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>하드웨어</category>
      <category>GPU power limit</category>
      <category>undervolting</category>
      <category>RTX PRO 6000</category>
      <category>RTX 5090</category>
      <category>AI inference</category>
      <category>power efficiency</category>
    </item>
    <item>
      <title>Core Web Vitals Debugging — Finding CLS and LCP Causes with Chrome DevTools</title>
      <link>https://treeru.com/en/blog/core-web-vitals-debugging-chrome-devtools</link>
      <description>Stop guessing from PageSpeed scores — trace the root causes. Use Chrome DevTools Performance tab, Layout Shift highlights, LCP element identification, and curl SSR analysis to fix Core Web Vitals.</description>
      <pubDate>Sun, 08 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/core-web-vitals-debugging-chrome-devtools</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>

    </item>
    <item>
      <title>LoRA Fine-Tuning for Custom AI Chatbots — From 10 Training Pairs to Multi-Tenant Serving</title>
      <link>https://treeru.com/en/blog/lora-finetuning-custom-ai-chatbot</link>
      <description>Build company-specific AI chatbots with just 10 training pairs and 6 seconds of LoRA fine-tuning. 73MB adapters, hot-swap serving via SGLang, and multi-tenant architecture for 5+ businesses on one GPU.</description>
      <pubDate>Sat, 07 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/lora-finetuning-custom-ai-chatbot</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>

    </item>
    <item>
      <title>Building an Admin Panel With Next.js — A Production-Ready Guide</title>
      <link>https://treeru.com/en/blog/nextjs-admin-panel-development</link>
      <description>Build a production-ready admin panel with Next.js. Covers architecture decisions, sidebar layout, auth middleware, RESTful API routes, and third-party service integrations.</description>
      <pubDate>Fri, 06 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/nextjs-admin-panel-development</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>
      <category>Next.js</category>
      <category>admin panel</category>
      <category>API routes</category>
      <category>middleware</category>
      <category>web development</category>
    </item>
    <item>
      <title>SGLang vs vLLM — The Secret Behind the 3x Throughput Gap</title>
      <link>https://treeru.com/en/blog/sglang-vs-vllm-serving-engine-comparison</link>
      <description>SGLang and vLLM tested with Qwen3-32B + 5 LoRA adapters under identical conditions up to 200 concurrent users. SGLang delivers 3x throughput, 6.5x better P95, and zero errors. Full benchmark data included.</description>
      <pubDate>Thu, 05 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/sglang-vs-vllm-serving-engine-comparison</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>
      <category>SGLang</category>
      <category>vLLM</category>
      <category>serving engine</category>
      <category>throughput</category>
      <category>P95 latency</category>
      <category>local AI</category>
      <category>benchmark</category>
    </item>
    <item>
      <title>The Truth About PageSpeed Scores — What You Need to Know Before Measuring</title>
      <link>https://treeru.com/en/blog/pagespeed-score-measurement-guide</link>
      <description>PageSpeed scores vary even for the same site. Understand ±3-5 point fluctuation causes, Lighthouse scoring weights, Mobile vs Desktop differences, and why diminishing returns hit hard after 90.</description>
      <pubDate>Wed, 04 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/pagespeed-score-measurement-guide</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>SEO</category>
      <category>PageSpeed</category>
      <category>Lighthouse</category>
      <category>Core Web Vitals</category>
      <category>Web Performance</category>
      <category>SEO</category>
      <category>LCP</category>
    </item>
    <item>
      <title>Disabling CPU Turbo Boost for Server Stability</title>
      <link>https://treeru.com/en/blog/cpu-turbo-boost-disable-server-stability</link>
      <description>How to disable CPU turbo boost on Intel N100, AMD 5825U, and 7840HS for server stability. Measured results: -37°C temperature, -50% power, 42% p99 latency improvement.</description>
      <pubDate>Tue, 03 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/cpu-turbo-boost-disable-server-stability</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>하드웨어</category>
      <category>CPU turbo boost</category>
      <category>server stability</category>
      <category>Linux power management</category>
      <category>Intel N100</category>
      <category>AMD 5825U</category>
      <category>thermal management</category>
      <category>systemd</category>
    </item>
    <item>
      <title>Local LLM Concurrent User Load Test — How Many Users Can an RTX PRO 6000 Handle?</title>
      <link>https://treeru.com/en/blog/local-llm-concurrent-users-load-test</link>
      <description>Real load test data on RTX PRO 6000 with 8B and 32B models serving up to 200 concurrent users. Response times, GPU thermals, throughput, and production architecture recommendations.</description>
      <pubDate>Tue, 03 Feb 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/local-llm-concurrent-users-load-test</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>

    </item>
    <item>
      <title>The Tailwind CSS Animation Library Trap — Cutting tw-animate-css from 15KB to 0.5KB</title>
      <link>https://treeru.com/en/blog/tailwind-animate-css-bundle-size-optimization</link>
      <description>Importing tw-animate-css adds 15KB of CSS to your bundle. Learn how to extract only the utilities you actually use, write @keyframes manually, and slash your render-blocking CSS by 97%.</description>
      <pubDate>Sat, 31 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/tailwind-animate-css-bundle-size-optimization</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>
      <category>Tailwind CSS</category>
      <category>tw-animate-css</category>
      <category>CSS Bundle</category>
      <category>PageSpeed</category>
      <category>Web Performance</category>
      <category>CSS Optimization</category>
    </item>
    <item>
      <title>Local LLM Business Test (Part 2) — Shopping, Legal &amp; Automation</title>
      <link>https://treeru.com/en/blog/local-llm-business-test-2</link>
      <description>Testing 6 local LLMs on e-commerce customer service, legal consultation, and business automation scenarios. Uncovers critical issues like legal citation fabrication and ethical refusal gaps across models.</description>
      <pubDate>Fri, 30 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/local-llm-business-test-2</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>
      <category>LLM</category>
      <category>business test</category>
      <category>legal AI</category>
      <category>e-commerce AI</category>
      <category>business automation</category>
      <category>local AI</category>
    </item>
    <item>
      <title>IndexNow for Instant Search Engine Notification — Stop Waiting for Crawlers</title>
      <link>https://treeru.com/en/blog/indexnow-instant-search-engine-notification</link>
      <description>No more waiting days for search indexing after publishing. IndexNow protocol explained: how it works, API implementation, key management, and submission history tracking.</description>
      <pubDate>Wed, 28 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/indexnow-instant-search-engine-notification</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>SEO</category>
      <category>IndexNow</category>
      <category>SEO</category>
      <category>Search Engine</category>
      <category>Bing</category>
      <category>Indexing</category>
      <category>Crawling</category>
    </item>
    <item>
      <title>Local LLM Business Test (Part 1) — Manufacturing, SaaS, Healthcare</title>
      <link>https://treeru.com/en/blog/local-llm-business-test-1</link>
      <description>6 local LLMs tested on 30 real-world business questions across manufacturing parts distribution, SaaS customer support, and healthcare consultation. KORMo leads at 3.82/5, Phi-4 prescribes antibiotics.</description>
      <pubDate>Tue, 27 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/local-llm-business-test-1</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>
      <category>LLM</category>
      <category>Business Test</category>
      <category>Manufacturing AI</category>
      <category>SaaS</category>
      <category>Healthcare AI</category>
      <category>Local AI</category>
    </item>
    <item>
      <title>Next.js &lt;img&gt; vs &lt;Image&gt; — When to Use Which for PageSpeed</title>
      <link>https://treeru.com/en/blog/nextjs-img-vs-image-pagespeed</link>
      <description>Compare Next.js Image component auto-optimization vs manual img tag with AVIF. Learn when manually optimized img tags outperform the Image component for PageSpeed scores.</description>
      <pubDate>Tue, 27 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/nextjs-img-vs-image-pagespeed</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>
      <category>Next.js</category>
      <category>Image component</category>
      <category>img tag</category>
      <category>PageSpeed</category>
      <category>image optimization</category>
      <category>AVIF</category>
    </item>
    <item>
      <title>GPU Monitoring Automation — 24/7 Surveillance with a Single Shell Script</title>
      <link>https://treeru.com/en/blog/gpu-monitoring-shell-script-automation</link>
      <description>Automate GPU server monitoring with nvidia-smi and cron. 5-minute interval logging in CSV format, threshold-based alerts for temperature and utilization, and log rotation — all in ~50 lines of shell script.</description>
      <pubDate>Mon, 26 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/gpu-monitoring-shell-script-automation</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>TOOL</category>
      <category>GPU Monitoring</category>
      <category>nvidia-smi</category>
      <category>Shell Script</category>
      <category>cron</category>
      <category>Server Ops</category>
      <category>Automation</category>
    </item>
    <item>
      <title>Reverse Proxy on an N100 Mini PC — Why It&apos;s More Than Enough</title>
      <link>https://treeru.com/en/blog/reverse-proxy-n100-mini-pc</link>
      <description>Build a reverse proxy on an N100 mini PC with Caddy for automatic HTTPS, multi-service routing, and rock-solid uptime. Real-world resource usage data and Nginx comparison included.</description>
      <pubDate>Sat, 24 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/reverse-proxy-n100-mini-pc</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>네트워크</category>
      <category>reverse proxy</category>
      <category>Caddy</category>
      <category>N100</category>
      <category>mini PC</category>
      <category>HTTPS</category>
    </item>
    <item>
      <title>LCP from 110s to 3.4s — The motion/react opacity:0 Trap</title>
      <link>https://treeru.com/en/blog/lcp-optimization-motion-opacity-trap</link>
      <description>How motion/react&apos;s initial={{ opacity: 0 }} breaks LCP in Next.js SSR by hiding images from Lighthouse. Fix with scale transform, lazy loading, and AVIF for 110s→3.4s LCP.</description>
      <pubDate>Fri, 23 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/lcp-optimization-motion-opacity-trap</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>
      <category>LCP</category>
      <category>motion/react</category>
      <category>Framer Motion</category>
      <category>PageSpeed</category>
      <category>Core Web Vitals</category>
      <category>SSR</category>
      <category>Next.js</category>
    </item>
    <item>
      <title>LLM Hallucination Test — Which Local Models Fabricate Information?</title>
      <link>https://treeru.com/en/blog/local-llm-hallucination-test</link>
      <description>We tested 6 local LLMs with trap questions about nonexistent products, fake legal citations, and medical diagnosis prompts. Qwen3-14B passed 4/6 while Phi-4 passed only 1/6 and prescribed antibiotics.</description>
      <pubDate>Fri, 23 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/local-llm-hallucination-test</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>

    </item>
    <item>
      <title>Building a Lightweight Backend with Drizzle ORM + SQLite</title>
      <link>https://treeru.com/en/blog/drizzle-orm-sqlite-lightweight-backend</link>
      <description>When PostgreSQL is overkill, SQLite is the answer. Drizzle ORM vs Prisma comparison, schema definition, migration workflow, and WAL mode for concurrent read performance.</description>
      <pubDate>Tue, 20 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/drizzle-orm-sqlite-lightweight-backend</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>

    </item>
    <item>
      <title>Local LLM Korean Language Comparison — 6 Models Tested with 10 Real Questions</title>
      <link>https://treeru.com/en/blog/local-llm-korean-test</link>
      <description>We tested 6 local LLMs on Korean language ability with 10 real-world questions. Honorifics, business tone, language contamination, and natural expression compared across Gemma, Qwen3, KORMo, Llama, and Phi-4.</description>
      <pubDate>Tue, 20 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/local-llm-korean-test</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>
      <category>LLM</category>
      <category>Korean</category>
      <category>local AI</category>
      <category>Qwen3</category>
      <category>KORMo</category>
      <category>Gemma</category>
      <category>benchmark</category>
    </item>
    <item>
      <title>Image Optimization Alone Boosted PageSpeed by 20 Points — 36MB to 0.8MB</title>
      <link>https://treeru.com/en/blog/image-optimization-pagespeed-36mb-to-800kb</link>
      <description>Compressed 6 Hero images from 36MB to 0.8MB using sharp. WebP vs AVIF comparison, fetchPriority, lazy loading strategy — complete image optimization walkthrough.</description>
      <pubDate>Mon, 19 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/image-optimization-pagespeed-36mb-to-800kb</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>
      <category>Image Optimization</category>
      <category>sharp</category>
      <category>WebP</category>
      <category>AVIF</category>
      <category>PageSpeed</category>
      <category>LCP</category>
    </item>
    <item>
      <title>8B vs 14B vs 32B LLM: Concurrent User Benchmark on a Single GPU</title>
      <link>https://treeru.com/en/blog/local-llm-model-size-comparison</link>
      <description>Head-to-head benchmark of Qwen3 8B, 14B, and 32B on RTX PRO 6000 with up to 200 concurrent users. 3x speed difference, throughput scaling data, GPU thermals, and quality vs speed trade-off analysis.</description>
      <pubDate>Fri, 16 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/local-llm-model-size-comparison</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>AI</category>

    </item>
    <item>
      <title>SSH Key Authentication for Multi-Server Management — Passwordless, Secure Access</title>
      <link>https://treeru.com/en/blog/ssh-key-auth-multi-server-management</link>
      <description>From Ed25519 key generation to deployment, disabling password auth, SSH config aliases, ProxyJump, and fail2ban — a complete guide to managing multiple servers securely.</description>
      <pubDate>Fri, 16 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/ssh-key-auth-multi-server-management</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>네트워크</category>
      <category>SSH</category>
      <category>Key Authentication</category>
      <category>Server Management</category>
      <category>Security</category>
      <category>Linux</category>
    </item>
    <item>
      <title>sharp Image Optimization — Automate Web Image Compression With One Command</title>
      <link>https://treeru.com/en/blog/sharp-image-optimization-automation</link>
      <description>Automate image optimization with the sharp library. Learn WebP and AVIF conversion, batch processing 200 images in 30 seconds, quality tuning, and build pipeline integration.</description>
      <pubDate>Thu, 15 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/sharp-image-optimization-automation</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>TOOL</category>

    </item>
    <item>
      <title>Next.js PageSpeed Optimization — Mobile 38 to 88</title>
      <link>https://treeru.com/en/blog/nextjs-pagespeed-optimization-38-to-88</link>
      <description>A real-world record of improving a Next.js site&apos;s PageSpeed Mobile score from 38 to 88. Covers image compression, CLS elimination, font optimization, and LCP improvement across 4 phases with measured results.</description>
      <pubDate>Wed, 14 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/nextjs-pagespeed-optimization-38-to-88</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>
      <category>PageSpeed</category>
      <category>Next.js</category>
      <category>web performance</category>
      <category>Core Web Vitals</category>
      <category>LCP</category>
      <category>CLS</category>
    </item>
    <item>
      <title></title>
      <link>https://treeru.com/en/blog/rtx-pro-6000-llm-token-speed</link>
      <description>RTX PRO 6000 (96GB) token generation speed measured across 6 LLMs at 350W. From Llama-3.1-8B at 218 tok/s to KORMo at 60 tok/s — speed vs quality trade-off analysis with full data.</description>
      <pubDate>Tue, 13 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/rtx-pro-6000-llm-token-speed</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>하드웨어</category>

    </item>
    <item>
      <title>Implementing Next.js Authentication with Better Auth — Sessions, Admin Roles, and SQLite</title>
      <link>https://treeru.com/en/blog/better-auth-nextjs-authentication</link>
      <description>Complete guide to Better Auth for Next.js: email/password auth, cookie-based sessions, admin role separation, Drizzle ORM + SQLite, and security hardening.</description>
      <pubDate>Mon, 12 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/better-auth-nextjs-authentication</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>
      <category>Better Auth</category>
      <category>Next.js</category>
      <category>Authentication</category>
      <category>Session Management</category>
      <category>Admin Roles</category>
      <category>SQLite</category>
    </item>
    <item>
      <title>Why CJK Web Fonts Kill Your PageSpeed — Render-Blocking CSS from 26KB to 3KB</title>
      <link>https://treeru.com/en/blog/korean-webfont-pagespeed-render-blocking-css</link>
      <description>Loading CJK fonts via next/font/google generates 124 @font-face rules and 26KB of render-blocking CSS. Learn how custom subsets, font-display:optional, and self-hosting cut it to 3KB with zero CLS.</description>
      <pubDate>Sat, 10 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/korean-webfont-pagespeed-render-blocking-css</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>
      <category>Web Fonts</category>
      <category>CJK Fonts</category>
      <category>PageSpeed</category>
      <category>Render-Blocking CSS</category>
      <category>font-display</category>
      <category>Next.js</category>
    </item>
    <item>
      <title>RTX Pro 6000 Local LLM Benchmark — 6 Models, 360 Questions, Complete Ranking</title>
      <link>https://treeru.com/en/blog/rtx-pro-6000-local-llm-benchmark</link>
      <description>Comprehensive benchmark of 6 local LLMs on RTX Pro 6000 (96GB) across 7 scenarios and 360 questions. Qwen3-14B leads at 3.86/5 with 135 tok/s. Full speed, quality, and hallucination test results.</description>
      <pubDate>Fri, 09 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/rtx-pro-6000-local-llm-benchmark</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>하드웨어</category>

    </item>
    <item>
      <title>Diagnosing Office Network Speed with iperf3 — 1G vs 2.5G Real Measurements</title>
      <link>https://treeru.com/en/blog/iperf3-office-network-speed-test</link>
      <description>Measure real office network speed with iperf3. Compare 1Gbps vs 2.5Gbps NIC performance, diagnose bottlenecks with ethtool, and troubleshoot slow servers with cable and port testing.</description>
      <pubDate>Thu, 08 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/iperf3-office-network-speed-test</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>네트워크</category>

    </item>
    <item>
      <title>CLS 0.5 to Zero — The TypeWriter Animation Trap and How to Fix It</title>
      <link>https://treeru.com/en/blog/cls-zero-typewriter-animation-fix</link>
      <description>Typing animation caused CLS 0.481. The Invisible Placeholder pattern eliminates layout shift with zero visual change. Mobile PageSpeed jumped 25 points.</description>
      <pubDate>Tue, 06 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/cls-zero-typewriter-animation-fix</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>
      <category>CLS</category>
      <category>Core Web Vitals</category>
      <category>PageSpeed</category>
      <category>Animation</category>
      <category>Layout Shift</category>
      <category>Next.js</category>
    </item>
    <item>
      <title>Building AI Server Infrastructure in the Office — A 16-Server Setup Guide</title>
      <link>https://treeru.com/en/blog/office-ai-server-infrastructure</link>
      <description>How we built an on-premise AI infrastructure with 16 servers — no cloud required. Covers server role separation, SSH mesh security, 2.5Gbps network tiers, CPU boost control, and external server isolation.</description>
      <pubDate>Tue, 06 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/office-ai-server-infrastructure</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>하드웨어</category>

    </item>
    <item>
      <title>Building Websites with AI Coding Tools — How Far Can Prompts Go?</title>
      <link>https://treeru.com/en/blog/ai-coding-tool-website-development</link>
      <description>Hands-on experience building websites with AI coding tools. What works, what fails, prompt writing tips, and productivity-vs-cost analysis from 4 real projects.</description>
      <pubDate>Mon, 05 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/ai-coding-tool-website-development</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>TOOL</category>
      <category>AI Coding</category>
      <category>Web Development</category>
      <category>Prompt Engineering</category>
      <category>Productivity</category>
      <category>AI Tools</category>
      <category>Developer</category>
    </item>
    <item>
      <title>Web Deployment Automation — From Git Push to Production in One Command</title>
      <link>https://treeru.com/en/blog/web-deployment-automation-git-to-production</link>
      <description>Build a reliable deployment pipeline without CI/CD tools. Git push → build → restart pipeline, rollback strategy, environment variable management, and multi-server synchronization.</description>
      <pubDate>Sat, 03 Jan 2026 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/web-deployment-automation-git-to-production</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>
      <category>Deployment Automation</category>
      <category>Git</category>
      <category>CI/CD</category>
      <category>Shell Script</category>
      <category>DevOps</category>
      <category>Rollback</category>
    </item>
    <item>
      <title>WireGuard VPN for Remote Office Server Access</title>
      <link>https://treeru.com/en/blog/wireguard-vpn-office-remote-access</link>
      <description>Step-by-step guide to building a WireGuard VPN server on OPNsense for secure remote access to office servers from laptops and mobile devices — with key management and troubleshooting.</description>
      <pubDate>Sat, 27 Dec 2025 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/wireguard-vpn-office-remote-access</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>네트워크</category>
      <category>WireGuard</category>
      <category>VPN</category>
      <category>Remote Access</category>
      <category>OPNsense</category>
      <category>Network Security</category>
    </item>
    <item>
      <title>Caddy HTTPS Automation — Let&apos;s Encrypt Certificates That Renew Forever</title>
      <link>https://treeru.com/en/blog/caddy-https-auto-ssl-setup</link>
      <description>Replace Nginx complexity with Caddy. Learn Caddyfile syntax, one-line reverse proxy, multi-domain hosting, security headers, and automatic Let&apos;s Encrypt certificate renewal.</description>
      <pubDate>Tue, 23 Dec 2025 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/caddy-https-auto-ssl-setup</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>

    </item>
    <item>
      <title>Top 10 Web Design Trends for 2024 — AI, Dark Mode, Glassmorphism &amp; More</title>
      <link>https://treeru.com/en/blog/website-design-trends-2024</link>
      <description>Explore the top 10 web design trends for 2024. From AI-powered design tools and dark mode to glassmorphism and micro-interactions, learn what shapes modern web design.</description>
      <pubDate>Mon, 22 Dec 2025 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/website-design-trends-2024</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>디자인</category>

    </item>
    <item>
      <title>Building an Office Network with OPNsense — A Firewall Router Instead of Consumer Wi-Fi</title>
      <link>https://treeru.com/en/blog/opnsense-office-network-setup</link>
      <description>How we installed OPNsense on an N100 mini PC to build a small office network with VLAN segmentation, granular firewall rules, and WireGuard VPN — all for under $300.</description>
      <pubDate>Sat, 20 Dec 2025 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/opnsense-office-network-setup</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>네트워크</category>
      <category>OPNsense</category>
      <category>Firewall</category>
      <category>VLAN</category>
      <category>Network Infrastructure</category>
      <category>Mini PC</category>
      <category>Office Network</category>
    </item>
    <item>
      <title>Claude Code Hands-On Review: AI That Writes Your Code From the Terminal</title>
      <link>https://treeru.com/en/blog/claude-code-ai-coding-review</link>
      <description>3-month hands-on review of Claude Code CLI. Wrote 23 blog posts, pushed PageSpeed from 38 to 88, automated server management. Includes Cursor comparison and cost analysis.</description>
      <pubDate>Fri, 19 Dec 2025 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/claude-code-ai-coding-review</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>TOOL</category>
      <category>Claude Code</category>
      <category>AI coding</category>
      <category>CLI</category>
      <category>coding tools</category>
      <category>Cursor</category>
      <category>AI development</category>
      <category>developer tools</category>
    </item>
    <item>
      <title>The Complete Website SEO Optimization Guide</title>
      <link>https://treeru.com/en/blog/seo-best-practices</link>
      <description>A practical guide to search engine optimization. Covers meta tags, content strategy, Core Web Vitals, link building, and continuous monitoring for Google rankings.</description>
      <pubDate>Thu, 18 Dec 2025 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/seo-best-practices</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>SEO</category>
      <category>SEO</category>
      <category>search optimization</category>
      <category>marketing</category>
      <category>Google</category>
    </item>
    <item>
      <title>Running Next.js in Production With PM2 — Zero-Downtime Deploys and Monitoring</title>
      <link>https://treeru.com/en/blog/pm2-nextjs-production-deployment</link>
      <description>Run Next.js on your own server without Vercel. Learn PM2 ecosystem config, cluster vs fork mode, zero-downtime reload, pm2-logrotate, and systemd auto-restart.</description>
      <pubDate>Wed, 17 Dec 2025 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/pm2-nextjs-production-deployment</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>

    </item>
    <item>
      <title>Why Responsive Web Design Matters — Mobile-First CSS Techniques</title>
      <link>https://treeru.com/en/blog/responsive-web-design</link>
      <description>Learn why responsive web design is essential for SEO and UX. Covers fluid grids, media queries, flexible images, and mobile-first approach with practical CSS examples.</description>
      <pubDate>Mon, 15 Dec 2025 00:00:00 GMT</pubDate>
      <guid isPermaLink="true">https://treeru.com/en/blog/responsive-web-design</guid>
      <author>admin@treeru.com (Treeru)</author>
      <dc:creator>Treeru</dc:creator>
      <category>개발</category>

    </item>
  </channel>
</rss>