<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:dc="http://purl.org/dc/elements/1.1/">
  <channel>
    <title>Forem: Durgesh</title>
    <description>The latest articles on Forem by Durgesh (@ai-ops).</description>
    <link>https://forem.com/ai-ops</link>
    
    <atom:link rel="self" type="application/rss+xml" href="https://forem.com/feed/ai-ops"/>
    <language>en</language>
    <item>
      <title>Software architecture</title>
      <dc:creator>Durgesh</dc:creator>
      <pubDate>Sat, 23 Aug 2025 05:07:09 +0000</pubDate>
      <link>https://forem.com/ai-ops/software-architecture-1ej9</link>
      <guid>https://forem.com/ai-ops/software-architecture-1ej9</guid>
      <description>&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fnmpotqzytejj02haqjsx.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fnmpotqzytejj02haqjsx.png" alt=" " width="766" height="885"&gt;&lt;/a&gt;&lt;/p&gt;

</description>
    </item>
    <item>
      <title>Prompt engineering concepts</title>
      <dc:creator>Durgesh</dc:creator>
      <pubDate>Sat, 23 Aug 2025 05:04:06 +0000</pubDate>
      <link>https://forem.com/ai-ops/prompt-engineering-concepts-1jgc</link>
      <guid>https://forem.com/ai-ops/prompt-engineering-concepts-1jgc</guid>
      <description>&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F3ey7fdqwt49u84e21hhv.gif" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F3ey7fdqwt49u84e21hhv.gif" alt=" " width="800" height="1120"&gt;&lt;/a&gt;&lt;/p&gt;

</description>
    </item>
    <item>
      <title>OSI Model</title>
      <dc:creator>Durgesh</dc:creator>
      <pubDate>Sat, 23 Aug 2025 04:57:00 +0000</pubDate>
      <link>https://forem.com/ai-ops/devsecops-3p99</link>
      <guid>https://forem.com/ai-ops/devsecops-3p99</guid>
      <description>&lt;p&gt;Application Layer &lt;br&gt;
Presentation Layer &lt;br&gt;
Session Layer&lt;br&gt;
Transport Layer&lt;br&gt;
Network Layer&lt;br&gt;
Data Link Layer&lt;br&gt;
Physical Layer&lt;/p&gt;

</description>
    </item>
    <item>
      <title>Kubernetes vs conventional</title>
      <dc:creator>Durgesh</dc:creator>
      <pubDate>Sat, 23 Aug 2025 04:54:32 +0000</pubDate>
      <link>https://forem.com/ai-ops/kubernetes-vs-conventional-4p42</link>
      <guid>https://forem.com/ai-ops/kubernetes-vs-conventional-4p42</guid>
      <description>&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F0d7ojqvrt0l9f8od26mf.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F0d7ojqvrt0l9f8od26mf.jpg" alt=" " width="800" height="1036"&gt;&lt;/a&gt;&lt;/p&gt;

</description>
    </item>
    <item>
      <title>Use IAM roles to connect GitHub Actions to actions in AWS</title>
      <dc:creator>Durgesh</dc:creator>
      <pubDate>Sat, 23 Aug 2025 00:48:17 +0000</pubDate>
      <link>https://forem.com/ai-ops/use-iam-roles-to-connect-github-actions-to-actions-in-aws-c6a</link>
      <guid>https://forem.com/ai-ops/use-iam-roles-to-connect-github-actions-to-actions-in-aws-c6a</guid>
      <description>&lt;p&gt;&lt;a href="https://aws.amazon.com/blogs/security/use-iam-roles-to-connect-github-actions-to-actions-in-aws/" rel="noopener noreferrer"&gt;https://aws.amazon.com/blogs/security/use-iam-roles-to-connect-github-actions-to-actions-in-aws/&lt;/a&gt;&lt;/p&gt;

</description>
      <category>aws</category>
      <category>githubactions</category>
      <category>security</category>
      <category>devops</category>
    </item>
    <item>
      <title>AWS = Azure: Same difference</title>
      <dc:creator>Durgesh</dc:creator>
      <pubDate>Fri, 22 Aug 2025 20:30:23 +0000</pubDate>
      <link>https://forem.com/ai-ops/aws-azure-same-difference-95h</link>
      <guid>https://forem.com/ai-ops/aws-azure-same-difference-95h</guid>
      <description>&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fkjch6tja7s4my2hkm2nw.jpeg" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fkjch6tja7s4my2hkm2nw.jpeg" alt=" " width="800" height="1200"&gt;&lt;/a&gt;&lt;/p&gt;

</description>
    </item>
    <item>
      <title>Top Cloud Careers</title>
      <dc:creator>Durgesh</dc:creator>
      <pubDate>Fri, 22 Aug 2025 20:29:19 +0000</pubDate>
      <link>https://forem.com/ai-ops/top-cloud-careers-1fmg</link>
      <guid>https://forem.com/ai-ops/top-cloud-careers-1fmg</guid>
      <description>&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fqsu6yse6a1g5t6bk7frn.jpeg" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fqsu6yse6a1g5t6bk7frn.jpeg" alt=" " width="800" height="1111"&gt;&lt;/a&gt;&lt;/p&gt;

</description>
    </item>
    <item>
      <title>Devops Burger</title>
      <dc:creator>Durgesh</dc:creator>
      <pubDate>Fri, 22 Aug 2025 20:28:46 +0000</pubDate>
      <link>https://forem.com/ai-ops/devops-burger-26h5</link>
      <guid>https://forem.com/ai-ops/devops-burger-26h5</guid>
      <description>&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fzvsbwqjmc16vmay6g9su.jpeg" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fzvsbwqjmc16vmay6g9su.jpeg" alt=" " width="800" height="1111"&gt;&lt;/a&gt;&lt;/p&gt;

</description>
    </item>
    <item>
      <title>List of all LLM models</title>
      <dc:creator>Durgesh</dc:creator>
      <pubDate>Fri, 09 May 2025 21:43:56 +0000</pubDate>
      <link>https://forem.com/ai-ops/list-of-all-llm-models-2iae</link>
      <guid>https://forem.com/ai-ops/list-of-all-llm-models-2iae</guid>
      <description>&lt;p&gt;&lt;strong&gt;Major LLMs&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;GPT-4.5 / GPT-4o / GPT-o3 / GPT-o4-mini (OpenAI)&lt;/li&gt;
&lt;li&gt;Gemini 2.5 Pro / Gemini 2.0 / Gemini 1.5 (Google DeepMind)&lt;/li&gt;
&lt;li&gt;Claude 3.7 Sonnet / Claude 3.5 Sonnet (Anthropic)&lt;/li&gt;
&lt;li&gt;Grok-3 / Grok-2 / Grok-1 (xAI)&lt;/li&gt;
&lt;li&gt;Llama 3.1 / Llama 3 / Llama 2 (Meta AI)&lt;/li&gt;
&lt;li&gt;Mistral Large 2 / Mistral 7B / Mixtral 8x22B (Mistral AI)&lt;/li&gt;
&lt;li&gt;Qwen 3 / Qwen 2.5-Max (Alibaba)&lt;/li&gt;
&lt;li&gt;DeepSeek R1 / DeepSeek-V3 / DeepSeek-V2.5 (DeepSeek)&lt;/li&gt;
&lt;li&gt;Falcon 180B / Falcon 40B (Technology Innovation Institute)&lt;/li&gt;
&lt;li&gt;PaLM 2 (Google)&lt;/li&gt;
&lt;li&gt;Nova (Amazon)&lt;/li&gt;
&lt;li&gt;DBRX (Databricks' Mosaic ML)&lt;/li&gt;
&lt;li&gt;Command R (Cohere)&lt;/li&gt;
&lt;li&gt;Inflection-2.5 (Inflection AI)&lt;/li&gt;
&lt;li&gt;Gemma (Google DeepMind)&lt;/li&gt;
&lt;li&gt;Stable LM 2 (Stability AI)&lt;/li&gt;
&lt;li&gt;Nemotron-4 340B (NVIDIA)&lt;/li&gt;
&lt;li&gt;XGen-7B (Salesforce)&lt;/li&gt;
&lt;li&gt;Alpaca 7B (Stanford CRFM)&lt;/li&gt;
&lt;li&gt;Pythia (EleutherAI)&lt;/li&gt;
&lt;li&gt;Phi-3 (Microsoft)&lt;/li&gt;
&lt;li&gt;Jamba (AI21 Labs)&lt;/li&gt;
&lt;li&gt;Ernie (Baidu)&lt;/li&gt;
&lt;li&gt;Granite (IBM)&lt;/li&gt;
&lt;li&gt;BERT (Google, foundational transformer model)&lt;/li&gt;
&lt;/ul&gt;

</description>
      <category>ai</category>
      <category>aiops</category>
      <category>openai</category>
    </item>
    <item>
      <title>Introducing AWS CloudFormation Stack Refactoring</title>
      <dc:creator>Durgesh</dc:creator>
      <pubDate>Tue, 11 Feb 2025 00:46:39 +0000</pubDate>
      <link>https://forem.com/ai-ops/introducing-aws-cloudformation-stack-refactoring-4c23</link>
      <guid>https://forem.com/ai-ops/introducing-aws-cloudformation-stack-refactoring-4c23</guid>
      <description>&lt;p&gt;As your cloud infrastructure grows and evolves, you may find the need to reorganize your AWS CloudFormation stacks for better management, for improved modularity, or to align with changing business requirements. CloudFormation now offers a powerful feature that allows you to move resources between stacks. In this post, we’ll explore the process of stack refactoring and how it can help you maintain a well-organized and efficient cloud infrastructure.&lt;/p&gt;

&lt;p&gt;Read the whole article &lt;a href="https://aws.amazon.com/blogs/devops/introducing-aws-cloudformation-stack-refactoring/" rel="noopener noreferrer"&gt;here&lt;/a&gt; &lt;/p&gt;

</description>
      <category>aws</category>
      <category>terraform</category>
      <category>infrastructureascode</category>
    </item>
    <item>
      <title>Opensource AI vs Closed Source AI</title>
      <dc:creator>Durgesh</dc:creator>
      <pubDate>Mon, 03 Feb 2025 02:32:37 +0000</pubDate>
      <link>https://forem.com/ai-ops/opensource-ai-vs-closed-source-ai-1e2</link>
      <guid>https://forem.com/ai-ops/opensource-ai-vs-closed-source-ai-1e2</guid>
      <description>&lt;p&gt;Key Differences&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Accessibility &amp;amp; Collaboration&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;Open-source AI (e.g., Meta's LLaMA, Hugging Face models) provides publicly accessible code for modification and redistribution, fostering community collaboration and rapid innovation.&lt;br&gt;
Closed-source AI (e.g., GPT-4, Google Gemini) restricts code access to protect intellectual property, limiting customization but ensuring controlled updates and support.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Transparency &amp;amp; Security&lt;/strong&gt;&lt;br&gt;
Open-source models allow full scrutiny of algorithms and training data, enabling bias detection and ethical audits. However, public code access increases vulnerability to exploitation.&lt;br&gt;
Closed-source systems prioritize security through restricted access and centralized oversight, though limited transparency raises accountability concerns.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Cost &amp;amp; Customization&lt;/strong&gt;&lt;br&gt;
Open-source AI reduces initial costs (often free) but may incur expenses for deployment, maintenance, and specialized support.&lt;br&gt;
Closed-source AI involves licensing fees and vendor dependency, but offers streamlined implementation and reliability.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Advantages and Challenges&lt;/strong&gt;&lt;br&gt;
Open-Source AI&lt;br&gt;
Pros:&lt;br&gt;
Innovation: Community contributions accelerate development (e.g., LLaMA 3's rapid improvements via public input).&lt;br&gt;
Customization: Adaptable for niche use cases, such as academic research or tailored enterprise solutions.&lt;br&gt;
Transparency: Auditable code builds trust in data handling and decision-making processes.&lt;/p&gt;

&lt;p&gt;Cons:&lt;br&gt;
Security risks: Public code exposes vulnerabilities.&lt;br&gt;
Fragmented support: Reliance on community troubleshooting.&lt;br&gt;
Closed-Source AI&lt;br&gt;
Pros:&lt;br&gt;
Quality control: Consistent performance via managed updates (e.g., GPT-4's iterative enhancements).&lt;br&gt;
Commercial viability: Monetization through APIs funds cutting-edge R&amp;amp;D.&lt;br&gt;
Regulatory compliance: Built-in safeguards against misuse (e.g., OpenAI's content moderation).&lt;br&gt;
Cons:&lt;br&gt;
Vendor lock-in: Migration barriers and limited adaptability.&lt;br&gt;
Opaque ethics: Hidden training data obscures biases.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;True Open-Source vs. Open Weights&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;Open-Source AI requires full transparency across four components under OSI-approved licenses:&lt;br&gt;
Training data (sources and processing methods)&lt;br&gt;
Model architecture code&lt;br&gt;
Training methodology (hyperparameters, optimization strategies)&lt;br&gt;
Model weights&lt;br&gt;
Only 18% of models claiming to be open source meet all criteria, per Hugging Face's 2024 audit. Most fall into open weights territory, providing only model parameters with restricted licenses.&lt;/p&gt;

&lt;p&gt;OpenAI = not open&lt;/p&gt;

&lt;p&gt;Open source AI = not actually open source (no data)&lt;/p&gt;

&lt;p&gt;Scaling laws = not actually laws&lt;/p&gt;

</description>
      <category>deepseek</category>
      <category>opensource</category>
      <category>openai</category>
    </item>
    <item>
      <title>Deepseek AI paper</title>
      <dc:creator>Durgesh</dc:creator>
      <pubDate>Mon, 03 Feb 2025 00:31:49 +0000</pubDate>
      <link>https://forem.com/ai-ops/deepseek-ai-paper-4b16</link>
      <guid>https://forem.com/ai-ops/deepseek-ai-paper-4b16</guid>
      <description>&lt;p&gt;3 things from the DeepSeek AI R1 paper&lt;/p&gt;

&lt;p&gt;1️⃣ Skipping Supervised Fine-Tuning: Better Reasoning, Poorer Readability&lt;/p&gt;

&lt;p&gt;⛳ Most LLMs follow three stages: pretraining for language understanding, supervised fine-tuning for task-specific learning, and reinforcement learning to align with human preferences using reward systems.&lt;/p&gt;

&lt;p&gt;⛳ DeepSeek-R1-Zero broke this mold by skipping supervised fine-tuning entirely and relying solely on reinforcement learning.&lt;/p&gt;

&lt;p&gt;⛳ According to the authors, this allowed the model to independently develop reasoning skills, including the ability to allocate extended “thinking time” and generate thousands of reasoning tokens for solving complex tasks. This unconventional approach significantly boosted performance, even surpassing OpenAI-o1 on benchmarks.&lt;/p&gt;

&lt;p&gt;However, the text generated is notably less readabl, an acknowledged limitation of DeepSeek-R1-Zero.&lt;/p&gt;

&lt;p&gt;2️⃣ High-Quality Data Remains the Moat&lt;br&gt;
⛳ While DeepSeek-R1-Zero achieved remarkable results with pure reinforcement learning, introducing a small set of carefully curated cold-start data for Fine-Tuning (thousands of high-quality examples) in DeepSeek-R1 led to notable improvements in readability, language consistency, and reasoning. &lt;br&gt;
⛳ This highlights how even minimal amounts of high-quality data can dramatically enhance the effectiveness of RL-trained models.&lt;/p&gt;

&lt;p&gt;3️⃣ Distillation Over Training for Smaller Models&lt;br&gt;
⛳ DeepSeek-R1’s reasoning capabilities were distilled into smaller models ranging from 1.5B to 70B parameters. These distilled models consistently outperformed much larger models like GPT-4o and Claude-3.5-Sonnet on multiple benchmarks. &lt;br&gt;
⛳ This demonstrates that distillation allows smaller models to inherit remarkable reasoning abilities from larger, more powerful models, often outperforming models trained from scratch.&lt;/p&gt;

</description>
      <category>deepseek</category>
      <category>deeplearning</category>
    </item>
  </channel>
</rss>
