[{"data":1,"prerenderedAt":3207},["ShallowReactive",2],{"/en-us/blog/tags/ai-ml/":3,"navigation-en-us":20,"banner-en-us":438,"footer-en-us":453,"AI/ML-tag-page-en-us":664},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":11,"_id":13,"_type":14,"title":15,"_source":16,"_file":17,"_stem":18,"_extension":19},"/en-us/blog/tags/ai-ml","tags",false,"",{"tag":9,"tagSlug":10},"AI/ML","ai-ml",{"template":12},"BlogTag","content:en-us:blog:tags:ai-ml.yml","yaml","Ai Ml","content","en-us/blog/tags/ai-ml.yml","en-us/blog/tags/ai-ml","yml",{"_path":21,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":23,"_id":434,"_type":14,"title":435,"_source":16,"_file":436,"_stem":437,"_extension":19},"/shared/en-us/main-navigation","en-us",{"logo":24,"freeTrial":29,"sales":34,"login":39,"items":44,"search":375,"minimal":406,"duo":425},{"config":25},{"href":26,"dataGaName":27,"dataGaLocation":28},"/","gitlab logo","header",{"text":30,"config":31},"Get free trial",{"href":32,"dataGaName":33,"dataGaLocation":28},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":35,"config":36},"Talk to sales",{"href":37,"dataGaName":38,"dataGaLocation":28},"/sales/","sales",{"text":40,"config":41},"Sign in",{"href":42,"dataGaName":43,"dataGaLocation":28},"https://gitlab.com/users/sign_in/","sign in",[45,89,185,190,296,356],{"text":46,"config":47,"cards":49,"footer":72},"Platform",{"dataNavLevelOne":48},"platform",[50,56,64],{"title":46,"description":51,"link":52},"The most comprehensive AI-powered DevSecOps Platform",{"text":53,"config":54},"Explore our Platform",{"href":55,"dataGaName":48,"dataGaLocation":28},"/platform/",{"title":57,"description":58,"link":59},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":60,"config":61},"Meet GitLab Duo",{"href":62,"dataGaName":63,"dataGaLocation":28},"/gitlab-duo/","gitlab duo ai",{"title":65,"description":66,"link":67},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":68,"config":69},"Learn more",{"href":70,"dataGaName":71,"dataGaLocation":28},"/why-gitlab/","why gitlab",{"title":73,"items":74},"Get started with",[75,80,85],{"text":76,"config":77},"Platform Engineering",{"href":78,"dataGaName":79,"dataGaLocation":28},"/solutions/platform-engineering/","platform engineering",{"text":81,"config":82},"Developer Experience",{"href":83,"dataGaName":84,"dataGaLocation":28},"/developer-experience/","Developer experience",{"text":86,"config":87},"MLOps",{"href":88,"dataGaName":86,"dataGaLocation":28},"/topics/devops/the-role-of-ai-in-devops/",{"text":90,"left":91,"config":92,"link":94,"lists":98,"footer":167},"Product",true,{"dataNavLevelOne":93},"solutions",{"text":95,"config":96},"View all Solutions",{"href":97,"dataGaName":93,"dataGaLocation":28},"/solutions/",[99,124,146],{"title":100,"description":101,"link":102,"items":107},"Automation","CI/CD and automation to accelerate deployment",{"config":103},{"icon":104,"href":105,"dataGaName":106,"dataGaLocation":28},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[108,112,116,120],{"text":109,"config":110},"CI/CD",{"href":111,"dataGaLocation":28,"dataGaName":109},"/solutions/continuous-integration/",{"text":113,"config":114},"AI-Assisted Development",{"href":62,"dataGaLocation":28,"dataGaName":115},"AI assisted development",{"text":117,"config":118},"Source Code Management",{"href":119,"dataGaLocation":28,"dataGaName":117},"/solutions/source-code-management/",{"text":121,"config":122},"Automated Software Delivery",{"href":105,"dataGaLocation":28,"dataGaName":123},"Automated software delivery",{"title":125,"description":126,"link":127,"items":132},"Security","Deliver code faster without compromising security",{"config":128},{"href":129,"dataGaName":130,"dataGaLocation":28,"icon":131},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[133,136,141],{"text":134,"config":135},"Security & Compliance",{"href":129,"dataGaLocation":28,"dataGaName":134},{"text":137,"config":138},"Software Supply Chain Security",{"href":139,"dataGaLocation":28,"dataGaName":140},"/solutions/supply-chain/","Software supply chain security",{"text":142,"config":143},"Compliance & Governance",{"href":144,"dataGaLocation":28,"dataGaName":145},"/solutions/continuous-software-compliance/","Compliance and governance",{"title":147,"link":148,"items":153},"Measurement",{"config":149},{"icon":150,"href":151,"dataGaName":152,"dataGaLocation":28},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[154,158,162],{"text":155,"config":156},"Visibility & Measurement",{"href":151,"dataGaLocation":28,"dataGaName":157},"Visibility and Measurement",{"text":159,"config":160},"Value Stream Management",{"href":161,"dataGaLocation":28,"dataGaName":159},"/solutions/value-stream-management/",{"text":163,"config":164},"Analytics & Insights",{"href":165,"dataGaLocation":28,"dataGaName":166},"/solutions/analytics-and-insights/","Analytics and insights",{"title":168,"items":169},"GitLab for",[170,175,180],{"text":171,"config":172},"Enterprise",{"href":173,"dataGaLocation":28,"dataGaName":174},"/enterprise/","enterprise",{"text":176,"config":177},"Small Business",{"href":178,"dataGaLocation":28,"dataGaName":179},"/small-business/","small business",{"text":181,"config":182},"Public Sector",{"href":183,"dataGaLocation":28,"dataGaName":184},"/solutions/public-sector/","public sector",{"text":186,"config":187},"Pricing",{"href":188,"dataGaName":189,"dataGaLocation":28,"dataNavLevelOne":189},"/pricing/","pricing",{"text":191,"config":192,"link":194,"lists":198,"feature":283},"Resources",{"dataNavLevelOne":193},"resources",{"text":195,"config":196},"View all resources",{"href":197,"dataGaName":193,"dataGaLocation":28},"/resources/",[199,232,255],{"title":200,"items":201},"Getting started",[202,207,212,217,222,227],{"text":203,"config":204},"Install",{"href":205,"dataGaName":206,"dataGaLocation":28},"/install/","install",{"text":208,"config":209},"Quick start guides",{"href":210,"dataGaName":211,"dataGaLocation":28},"/get-started/","quick setup checklists",{"text":213,"config":214},"Learn",{"href":215,"dataGaLocation":28,"dataGaName":216},"https://university.gitlab.com/","learn",{"text":218,"config":219},"Product documentation",{"href":220,"dataGaName":221,"dataGaLocation":28},"https://docs.gitlab.com/","product documentation",{"text":223,"config":224},"Best practice videos",{"href":225,"dataGaName":226,"dataGaLocation":28},"/getting-started-videos/","best practice videos",{"text":228,"config":229},"Integrations",{"href":230,"dataGaName":231,"dataGaLocation":28},"/integrations/","integrations",{"title":233,"items":234},"Discover",[235,240,245,250],{"text":236,"config":237},"Customer success stories",{"href":238,"dataGaName":239,"dataGaLocation":28},"/customers/","customer success stories",{"text":241,"config":242},"Blog",{"href":243,"dataGaName":244,"dataGaLocation":28},"/blog/","blog",{"text":246,"config":247},"Remote",{"href":248,"dataGaName":249,"dataGaLocation":28},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":251,"config":252},"TeamOps",{"href":253,"dataGaName":254,"dataGaLocation":28},"/teamops/","teamops",{"title":256,"items":257},"Connect",[258,263,268,273,278],{"text":259,"config":260},"GitLab Services",{"href":261,"dataGaName":262,"dataGaLocation":28},"/services/","services",{"text":264,"config":265},"Community",{"href":266,"dataGaName":267,"dataGaLocation":28},"/community/","community",{"text":269,"config":270},"Forum",{"href":271,"dataGaName":272,"dataGaLocation":28},"https://forum.gitlab.com/","forum",{"text":274,"config":275},"Events",{"href":276,"dataGaName":277,"dataGaLocation":28},"/events/","events",{"text":279,"config":280},"Partners",{"href":281,"dataGaName":282,"dataGaLocation":28},"/partners/","partners",{"backgroundColor":284,"textColor":285,"text":286,"image":287,"link":291},"#2f2a6b","#fff","Insights for the future of software development",{"altText":288,"config":289},"the source promo card",{"src":290},"/images/navigation/the-source-promo-card.svg",{"text":292,"config":293},"Read the latest",{"href":294,"dataGaName":295,"dataGaLocation":28},"/the-source/","the source",{"text":297,"config":298,"lists":300},"Company",{"dataNavLevelOne":299},"company",[301],{"items":302},[303,308,314,316,321,326,331,336,341,346,351],{"text":304,"config":305},"About",{"href":306,"dataGaName":307,"dataGaLocation":28},"/company/","about",{"text":309,"config":310,"footerGa":313},"Jobs",{"href":311,"dataGaName":312,"dataGaLocation":28},"/jobs/","jobs",{"dataGaName":312},{"text":274,"config":315},{"href":276,"dataGaName":277,"dataGaLocation":28},{"text":317,"config":318},"Leadership",{"href":319,"dataGaName":320,"dataGaLocation":28},"/company/team/e-group/","leadership",{"text":322,"config":323},"Team",{"href":324,"dataGaName":325,"dataGaLocation":28},"/company/team/","team",{"text":327,"config":328},"Handbook",{"href":329,"dataGaName":330,"dataGaLocation":28},"https://handbook.gitlab.com/","handbook",{"text":332,"config":333},"Investor relations",{"href":334,"dataGaName":335,"dataGaLocation":28},"https://ir.gitlab.com/","investor relations",{"text":337,"config":338},"Trust Center",{"href":339,"dataGaName":340,"dataGaLocation":28},"/security/","trust center",{"text":342,"config":343},"AI Transparency Center",{"href":344,"dataGaName":345,"dataGaLocation":28},"/ai-transparency-center/","ai transparency center",{"text":347,"config":348},"Newsletter",{"href":349,"dataGaName":350,"dataGaLocation":28},"/company/contact/","newsletter",{"text":352,"config":353},"Press",{"href":354,"dataGaName":355,"dataGaLocation":28},"/press/","press",{"text":357,"config":358,"lists":359},"Contact us",{"dataNavLevelOne":299},[360],{"items":361},[362,365,370],{"text":35,"config":363},{"href":37,"dataGaName":364,"dataGaLocation":28},"talk to sales",{"text":366,"config":367},"Get help",{"href":368,"dataGaName":369,"dataGaLocation":28},"/support/","get help",{"text":371,"config":372},"Customer portal",{"href":373,"dataGaName":374,"dataGaLocation":28},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":376,"login":377,"suggestions":384},"Close",{"text":378,"link":379},"To search repositories and projects, login to",{"text":380,"config":381},"gitlab.com",{"href":42,"dataGaName":382,"dataGaLocation":383},"search login","search",{"text":385,"default":386},"Suggestions",[387,389,393,395,399,403],{"text":57,"config":388},{"href":62,"dataGaName":57,"dataGaLocation":383},{"text":390,"config":391},"Code Suggestions (AI)",{"href":392,"dataGaName":390,"dataGaLocation":383},"/solutions/code-suggestions/",{"text":109,"config":394},{"href":111,"dataGaName":109,"dataGaLocation":383},{"text":396,"config":397},"GitLab on AWS",{"href":398,"dataGaName":396,"dataGaLocation":383},"/partners/technology-partners/aws/",{"text":400,"config":401},"GitLab on Google Cloud",{"href":402,"dataGaName":400,"dataGaLocation":383},"/partners/technology-partners/google-cloud-platform/",{"text":404,"config":405},"Why GitLab?",{"href":70,"dataGaName":404,"dataGaLocation":383},{"freeTrial":407,"mobileIcon":412,"desktopIcon":417,"secondaryButton":420},{"text":408,"config":409},"Start free trial",{"href":410,"dataGaName":33,"dataGaLocation":411},"https://gitlab.com/-/trials/new/","nav",{"altText":413,"config":414},"Gitlab Icon",{"src":415,"dataGaName":416,"dataGaLocation":411},"/images/brand/gitlab-logo-tanuki.svg","gitlab icon",{"altText":413,"config":418},{"src":419,"dataGaName":416,"dataGaLocation":411},"/images/brand/gitlab-logo-type.svg",{"text":421,"config":422},"Get Started",{"href":423,"dataGaName":424,"dataGaLocation":411},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":426,"mobileIcon":430,"desktopIcon":432},{"text":427,"config":428},"Learn more about GitLab Duo",{"href":62,"dataGaName":429,"dataGaLocation":411},"gitlab duo",{"altText":413,"config":431},{"src":415,"dataGaName":416,"dataGaLocation":411},{"altText":413,"config":433},{"src":419,"dataGaName":416,"dataGaLocation":411},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":439,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"title":440,"button":441,"image":445,"config":448,"_id":450,"_type":14,"_source":16,"_file":451,"_stem":452,"_extension":19},"/shared/en-us/banner","is now in public beta!",{"text":68,"config":442},{"href":443,"dataGaName":444,"dataGaLocation":28},"/gitlab-duo/agent-platform/","duo banner",{"config":446},{"src":447},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1753720689/somrf9zaunk0xlt7ne4x.svg",{"layout":449},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":454,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":455,"_id":660,"_type":14,"title":661,"_source":16,"_file":662,"_stem":663,"_extension":19},"/shared/en-us/main-footer",{"text":456,"source":457,"edit":463,"contribute":468,"config":473,"items":478,"minimal":652},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":458,"config":459},"View page source",{"href":460,"dataGaName":461,"dataGaLocation":462},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":464,"config":465},"Edit this page",{"href":466,"dataGaName":467,"dataGaLocation":462},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":469,"config":470},"Please contribute",{"href":471,"dataGaName":472,"dataGaLocation":462},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":474,"facebook":475,"youtube":476,"linkedin":477},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[479,502,559,588,622],{"title":46,"links":480,"subMenu":485},[481],{"text":482,"config":483},"DevSecOps platform",{"href":55,"dataGaName":484,"dataGaLocation":462},"devsecops platform",[486],{"title":186,"links":487},[488,492,497],{"text":489,"config":490},"View plans",{"href":188,"dataGaName":491,"dataGaLocation":462},"view plans",{"text":493,"config":494},"Why Premium?",{"href":495,"dataGaName":496,"dataGaLocation":462},"/pricing/premium/","why premium",{"text":498,"config":499},"Why Ultimate?",{"href":500,"dataGaName":501,"dataGaLocation":462},"/pricing/ultimate/","why ultimate",{"title":503,"links":504},"Solutions",[505,510,513,515,520,525,529,532,536,541,543,546,549,554],{"text":506,"config":507},"Digital transformation",{"href":508,"dataGaName":509,"dataGaLocation":462},"/topics/digital-transformation/","digital transformation",{"text":134,"config":511},{"href":129,"dataGaName":512,"dataGaLocation":462},"security & compliance",{"text":123,"config":514},{"href":105,"dataGaName":106,"dataGaLocation":462},{"text":516,"config":517},"Agile development",{"href":518,"dataGaName":519,"dataGaLocation":462},"/solutions/agile-delivery/","agile delivery",{"text":521,"config":522},"Cloud transformation",{"href":523,"dataGaName":524,"dataGaLocation":462},"/topics/cloud-native/","cloud transformation",{"text":526,"config":527},"SCM",{"href":119,"dataGaName":528,"dataGaLocation":462},"source code management",{"text":109,"config":530},{"href":111,"dataGaName":531,"dataGaLocation":462},"continuous integration & delivery",{"text":533,"config":534},"Value stream management",{"href":161,"dataGaName":535,"dataGaLocation":462},"value stream management",{"text":537,"config":538},"GitOps",{"href":539,"dataGaName":540,"dataGaLocation":462},"/solutions/gitops/","gitops",{"text":171,"config":542},{"href":173,"dataGaName":174,"dataGaLocation":462},{"text":544,"config":545},"Small business",{"href":178,"dataGaName":179,"dataGaLocation":462},{"text":547,"config":548},"Public sector",{"href":183,"dataGaName":184,"dataGaLocation":462},{"text":550,"config":551},"Education",{"href":552,"dataGaName":553,"dataGaLocation":462},"/solutions/education/","education",{"text":555,"config":556},"Financial services",{"href":557,"dataGaName":558,"dataGaLocation":462},"/solutions/finance/","financial services",{"title":191,"links":560},[561,563,565,567,570,572,574,576,578,580,582,584,586],{"text":203,"config":562},{"href":205,"dataGaName":206,"dataGaLocation":462},{"text":208,"config":564},{"href":210,"dataGaName":211,"dataGaLocation":462},{"text":213,"config":566},{"href":215,"dataGaName":216,"dataGaLocation":462},{"text":218,"config":568},{"href":220,"dataGaName":569,"dataGaLocation":462},"docs",{"text":241,"config":571},{"href":243,"dataGaName":244,"dataGaLocation":462},{"text":236,"config":573},{"href":238,"dataGaName":239,"dataGaLocation":462},{"text":246,"config":575},{"href":248,"dataGaName":249,"dataGaLocation":462},{"text":259,"config":577},{"href":261,"dataGaName":262,"dataGaLocation":462},{"text":251,"config":579},{"href":253,"dataGaName":254,"dataGaLocation":462},{"text":264,"config":581},{"href":266,"dataGaName":267,"dataGaLocation":462},{"text":269,"config":583},{"href":271,"dataGaName":272,"dataGaLocation":462},{"text":274,"config":585},{"href":276,"dataGaName":277,"dataGaLocation":462},{"text":279,"config":587},{"href":281,"dataGaName":282,"dataGaLocation":462},{"title":297,"links":589},[590,592,594,596,598,600,602,606,611,613,615,617],{"text":304,"config":591},{"href":306,"dataGaName":299,"dataGaLocation":462},{"text":309,"config":593},{"href":311,"dataGaName":312,"dataGaLocation":462},{"text":317,"config":595},{"href":319,"dataGaName":320,"dataGaLocation":462},{"text":322,"config":597},{"href":324,"dataGaName":325,"dataGaLocation":462},{"text":327,"config":599},{"href":329,"dataGaName":330,"dataGaLocation":462},{"text":332,"config":601},{"href":334,"dataGaName":335,"dataGaLocation":462},{"text":603,"config":604},"Sustainability",{"href":605,"dataGaName":603,"dataGaLocation":462},"/sustainability/",{"text":607,"config":608},"Diversity, inclusion and belonging (DIB)",{"href":609,"dataGaName":610,"dataGaLocation":462},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":337,"config":612},{"href":339,"dataGaName":340,"dataGaLocation":462},{"text":347,"config":614},{"href":349,"dataGaName":350,"dataGaLocation":462},{"text":352,"config":616},{"href":354,"dataGaName":355,"dataGaLocation":462},{"text":618,"config":619},"Modern Slavery Transparency Statement",{"href":620,"dataGaName":621,"dataGaLocation":462},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":623,"links":624},"Contact Us",[625,628,630,632,637,642,647],{"text":626,"config":627},"Contact an expert",{"href":37,"dataGaName":38,"dataGaLocation":462},{"text":366,"config":629},{"href":368,"dataGaName":369,"dataGaLocation":462},{"text":371,"config":631},{"href":373,"dataGaName":374,"dataGaLocation":462},{"text":633,"config":634},"Status",{"href":635,"dataGaName":636,"dataGaLocation":462},"https://status.gitlab.com/","status",{"text":638,"config":639},"Terms of use",{"href":640,"dataGaName":641,"dataGaLocation":462},"/terms/","terms of use",{"text":643,"config":644},"Privacy statement",{"href":645,"dataGaName":646,"dataGaLocation":462},"/privacy/","privacy statement",{"text":648,"config":649},"Cookie preferences",{"dataGaName":650,"dataGaLocation":462,"id":651,"isOneTrustButton":91},"cookie preferences","ot-sdk-btn",{"items":653},[654,656,658],{"text":638,"config":655},{"href":640,"dataGaName":641,"dataGaLocation":462},{"text":643,"config":657},{"href":645,"dataGaName":646,"dataGaLocation":462},{"text":648,"config":659},{"dataGaName":650,"dataGaLocation":462,"id":651,"isOneTrustButton":91},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"allPosts":665,"featuredPost":3186,"totalPagesCount":3205,"initialPosts":3206},[666,686,711,732,753,773,794,815,835,856,876,898,918,939,958,977,998,1018,1046,1066,1086,1104,1122,1144,1164,1185,1205,1227,1246,1265,1284,1304,1324,1344,1365,1381,1401,1420,1439,1458,1476,1497,1517,1538,1557,1576,1593,1612,1632,1650,1669,1688,1707,1725,1745,1764,1784,1804,1822,1840,1860,1879,1901,1919,1939,1958,1978,1996,2015,2033,2052,2072,2093,2115,2134,2153,2172,2191,2211,2231,2251,2270,2290,2307,2324,2344,2365,2383,2402,2420,2438,2455,2473,2491,2509,2528,2548,2567,2588,2607,2625,2643,2661,2680,2699,2717,2738,2758,2778,2794,2811,2830,2848,2867,2887,2906,2925,2944,2962,2980,2999,3017,3037,3057,3076,3095,3113,3131,3150,3168],{"_path":667,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":668,"content":671,"config":679,"_id":682,"_type":14,"title":683,"_source":16,"_file":684,"_stem":685,"_extension":19},"/en-us/blog/3-best-practices-for-building-software-in-the-era-of-llms",{"noIndex":6,"title":669,"description":670},"3 best practices for building software in the era of LLMs","With AI transforming coding speed, developers need new security habits. Learn what they are and how to deploy them throughout the DevSecOps workflow.",{"title":669,"description":670,"authors":672,"heroImage":674,"body":675,"date":676,"category":10,"tags":677},[673],"Salman Ladha","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662523/Blog/Hero%20Images/Gartner_DevOps_Blog_Post_Cover_Image_1800x945__2_.png","AI has rapidly become a core part of modern software development. Not only is it helping developers code faster than ever, but it’s also automating low-level tasks like writing test cases or summarizing documentation. According to our [2024 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/), 81% of developers are already using AI in their workflows or plan to in the next two years.\n\nAs code is written with less manual effort, we’re seeing a subtle but important behavioral change: Developers are beginning to trust AI-generated code with less scrutiny. That confidence — understandable as it may be — can quietly introduce security risks, especially as the overall volume of code increases. Developers can’t be expected to stay on top of every vulnerability or exploit, which is why we need systems and safeguards that scale with them. AI tools are here to stay. So, as security professionals, it’s incumbent on you to empower developers to adopt them in a way that improves both speed and security.\n\nHere are three practical ways to do that. \n\n## Never trust, always verify\n\nAs mentioned above, developers are beginning to trust AI-generated code more readily, especially when it looks clean and compiles without error. To combat this, adopt a zero-trust mindset. While we often talk about [zero trust](https://about.gitlab.com/blog/why-devops-and-zero-trust-go-together/) in the context of identity and access management, the same principle can be applied here with a slightly different framing. Treat AI-generated code like input from a junior developer: helpful, but not production-ready without a proper review. \n\nA developer should be able to explain what the code is doing and why it’s safe before it gets merged. Reviewing AI-generated code might even shape up to be an emerging skillset required in the world of software development. The developers who excel at this will be indispensable because they’ll marry the speed of LLMs with the risk reduction mindset to produce secure code, faster. \n\nThis is where tools like [GitLab Duo Code Review](https://docs.gitlab.com/user/project/merge_requests/duo_in_merge_requests/) can help. As a feature of our AI companion across the software development lifecycle, it brings AI into the code review process, not to replace human judgment, but to enhance it. By surfacing questions, inconsistencies, and overlooked issues in the merge requests, AI can help developers keep up with the very AI that’s accelerating development cycles. \n\n## Prompt for secure patterns\n\nLarge language models ([LLMs](https://about.gitlab.com/blog/what-is-a-large-language-model-llm/))  are powerful, but only as precise as the prompts they’re given. That’s why prompt engineering is becoming a core part of working with AI tools. In the world of LLMs, your input *is* the interface. Developers who learn to write clear, security-aware prompts will play a key role in building safer software from the start.\n\nFor example, vague requests like “build a login form” often produce insecure or overly simplistic results. However, by including more context, such as “build a login form **with** input validation, rate limiting, and hashing, **and** support phishing-resistant authentication methods like passkeys,” you’re more likely to produce an output that meets the security standards of your organization. \n\nRecent [research](https://www.backslash.security/press-releases/backslash-security-reveals-in-new-research-that-gpt-4-1-other-popular-llms-generate-insecure-code-unless-explicitly-prompted) from Backlash Security backs this up. They found that secure prompting improved results across popular LLMs. When developers simply asked models to “write secure code,” success rates remained low. However, when prompts referenced [OWASP best practices](https://cheatsheetseries.owasp.org/cheatsheets/LLM_Prompt_Injection_Prevention_Cheat_Sheet.html), the rate of secure code generation increased. \n\nPrompt engineering should be part of how we train and empower security champions within development teams. Just like we teach secure coding patterns and threat modeling, we should also be teaching developers how to guide AI tools with the same security mindset. \n\n> Learn more with these helpful [prompt engineering tips](https://docs.gitlab.com/development/ai_features/prompt_engineering/).\n\n## Scan everything, no exceptions\n\nThe rise of AI means we’re writing more code, quicker, with the same number of humans. That shift should change how we think about security, not just as a final check, but as an always-on safeguard woven into every aspect of the development process.\n\nMore code means a wider attack surface. And when that code is partially or fully generated, we can’t solely rely on secure coding practices or individual intuition to spot risks. That’s where automated scanning comes in. [Static Application Security Testing (SAST)](https://docs.gitlab.com/user/application_security/sast/), [Software Composition Analysis (SCA)](https://docs.gitlab.com/user/application_security/dependency_scanning/), and [Secret Detection](https://docs.gitlab.com/user/application_security/secret_detection/) become critical controls to mitigate the risk of secret leaks, supply chain attacks, and weaknesses like SQL injections. With platforms like GitLab, [application security](https://about.gitlab.com/solutions/security-compliance/) is natively built into the developer's workflow, making it a natural part of the development lifecycle. Scanners can also trace through the entire program to make sure new AI-generated code is secure *in the context of all the other code* — that can be hard to spot if you’re just looking at some new code in your IDE or in an AI-generated patch.\n\nBut it’s not just about scanning, it’s about keeping pace. If development teams are going to match the speed of AI-assisted development, they need scans that are fast, accurate, and built to scale. Accuracy especially matters. If scanners overwhelm developers with false positives, there’s a risk of losing trust in the system altogether. \n\nThe only way to move fast *and* stay secure is to make scanning non-negotiable. \n\nEvery commit. Every branch. No exceptions.\n\n## Secure your AI-generated code with GitLab\n\nAI is changing the way we build software, but the fundamentals of secure software development still apply. Code still needs to be reviewed. Threats still need to be tested. And security still needs to be embedded in the way we work. At GitLab, that’s exactly what we’ve done. \n\nAs a developer platform, we’re not bolting security onto the workflow — we’re embedding it directly where developers already work: in the IDE, in merge requests, and in the pipeline. Scans run automatically and relevant security context is surfaced to facilitate faster remediation cycles. And, because it’s part of the same platform where developers build, test, and deploy software, there are fewer tools to juggle, less context switching, and a much smoother path to secure code.\n\nAI features like [Duo Vulnerability Explanation and Vulnerability Resolution](https://about.gitlab.com/the-source/ai/understand-and-resolve-vulnerabilities-with-ai-powered-gitlab-duo/) add another layer of speed and insight, helping developers understand risks and fix them faster, without breaking their flow.\n\nAI isn’t a shortcut to security. But with the right practices — and a platform that meets developers where they are — it can absolutely be part of building software that’s fast, secure, and scalable. \n\n> Start your [free 60-day trial of GitLab Ultimate with Duo Enterprise](https://about.gitlab.com/free-trial/) and experience what it’s like to build secure software, faster. With native security scanning, AI-powered insights, and a seamless developer experience, GitLab helps you shift security left without slowing down.","2025-07-10",[9,678],"security",{"featured":91,"template":680,"slug":681},"BlogPost","3-best-practices-for-building-software-in-the-era-of-llms","content:en-us:blog:3-best-practices-for-building-software-in-the-era-of-llms.yml","3 Best Practices For Building Software In The Era Of Llms","en-us/blog/3-best-practices-for-building-software-in-the-era-of-llms.yml","en-us/blog/3-best-practices-for-building-software-in-the-era-of-llms",{"_path":687,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":688,"content":696,"config":705,"_id":707,"_type":14,"title":708,"_source":16,"_file":709,"_stem":710,"_extension":19},"/en-us/blog/5-things-to-know-from-our-linkedin-live-security-deep-dive",{"title":689,"description":690,"ogTitle":689,"ogDescription":690,"noIndex":6,"ogImage":691,"ogUrl":692,"ogSiteName":693,"ogType":694,"canonicalUrls":692,"schema":695},"5 things to know from our LinkedIn Live Security Deep Dive","Security experts and product leaders offered their take on new developments in application security and the latest from GitLab 17.5.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659856/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25.png","https://about.gitlab.com/blog/5-things-to-know-from-our-linkedin-live-security-deep-dive","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 things to know from our LinkedIn Live Security Deep Dive\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2024-10-28\",\n      }",{"title":689,"description":690,"authors":697,"heroImage":691,"date":699,"body":700,"category":678,"tags":701},[698],"Fatima Sarah Khalid","2024-10-28","[GitLab's October LinkedIn Live broadcast](https://www.linkedin.com/feed/update/urn:li:activity:7255246777077936128) brought together security experts and product leaders to discuss the latest developments in application security and highlight key features from the GitLab 17.5 release. In case you missed it, here's what you need to know.\n\n## 1. Software is moving faster and security is struggling to keep up\nDevelopment teams are shipping at record speeds, but their security counterparts are finding it difficult to meet that pace. Our [DevSecOps survey](https://about.gitlab.com/developer-survey/) revealed that 66% of companies are shipping code twice as fast as last year, while 55% of security teams are finding vulnerabilities after code is merged to test environments. With 80% of top data breaches coming from application layer attacks, this gap must be addressed.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1023367700?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Market Insights\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## 2. Advanced SAST is getting smarter\nGitLab's new [Advanced SAST](https://about.gitlab.com/blog/gitlab-advanced-sast-is-now-generally-available/) capabilities are a game-changer for security testing. Built on technology acquired from Oxeye, Advanced SAST offers cross-file and cross-function scanning with taint analysis. The star feature is a code flow view that lets developers trace vulnerabilities from source to sink, making it easier to understand and fix security issues.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1023369304?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Advanced SAST\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n> Learn even more with our [Advanced SAST tutorial](https://about.gitlab.com/blog/quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai/).\n\n## 3. Accidental secret commits are a thing of the past\nGitLab's new [secret push protection feature](https://about.gitlab.com/blog/prevent-secret-leaks-in-source-code-with-gitlab-secret-push-protection/) stops sensitive information from reaching your GitLab repository by checking the contents of each commit. Instead of dealing with the aftermath of exposed credentials, the system catches secrets before they're committed, saving security teams countless hours of remediation work.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1023370222?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Secret Push\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## 4. AI is a security catalyst\nAI isn't just for code completion anymore. GitLab Duo has evolved to understand merge requests and provide contextual security assistance. With the new Quick Chat feature (accessible via Alt+C), developers can get security insights without leaving their editor.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1023385333?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"AI Security\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## 5. Static reachability reduces security noise\nThe new static reachability feature for Python and Java helps teams focus on vulnerabilities that matter. By identifying which dependencies are actually used in your code, it reduces false positives and helps teams prioritize real security threats.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1023388137?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Static Reachability\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Watch on-demand now\n\n[Watch the full \"Security Deep Dive\" recording](https://www.linkedin.com/feed/update/urn:li:activity:7255246777077936128) to see these features in action and hear more insights from our security experts.\n\nBe sure to follow GitLab on LinkedIn to be notified of our monthly broadcasts and get more insights and the latest news about AI-powered DevSecOps.",[9,678,702,703,704],"webcast","DevSecOps","features",{"slug":706,"featured":91,"template":680},"5-things-to-know-from-our-linkedin-live-security-deep-dive","content:en-us:blog:5-things-to-know-from-our-linkedin-live-security-deep-dive.yml","5 Things To Know From Our Linkedin Live Security Deep Dive","en-us/blog/5-things-to-know-from-our-linkedin-live-security-deep-dive.yml","en-us/blog/5-things-to-know-from-our-linkedin-live-security-deep-dive",{"_path":712,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":713,"content":718,"config":726,"_id":728,"_type":14,"title":729,"_source":16,"_file":730,"_stem":731,"_extension":19},"/en-us/blog/5-videos-and-interactive-tours-to-learn-gitlab-duo-fast",{"title":714,"description":715,"ogTitle":714,"ogDescription":715,"noIndex":6,"ogImage":691,"ogUrl":716,"ogSiteName":693,"ogType":694,"canonicalUrls":716,"schema":717},"5 videos and interactive tours to learn GitLab Duo fast","Get to know GitLab Duo's capabilities and benefits, and use these visual learning tools to understand how to incorporate AI throughout your software development lifecycle.\n","https://about.gitlab.com/blog/5-videos-and-interactive-tours-to-learn-gitlab-duo-fast","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 videos and interactive tours to learn GitLab Duo fast\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2024-08-28\",\n      }",{"title":714,"description":715,"authors":719,"heroImage":691,"date":721,"body":722,"category":10,"tags":723},[720],"Cesar Saavedra","2024-08-28","GitLab Duo is a suite of AI-powered features designed to assist DevSecOps teams throughout the software development lifecycle. Integrated seamlessly into the GitLab platform, GitLab Duo leverages artificial intelligence to enhance productivity, improve code quality, and streamline various development and security processes. This article introduces you to GitLab Duo's capabilities and benefits, and lists five videos and interactive tours to help you learn how to incorporate this AI powerhouse into your own workflow.\n\nIn this article:\n- [GitLab Duo features](#gitlab-duo-features)\n- [Benefits of GitLab Duo](#benefits-of-gitlab-duo)\n- [5 videos and interactive tours](#5-videos-and-interactive-tours-to-learn-gitlab-duo)\n\n## GitLab Duo features\n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/) offers a wide range of AI-powered capabilities to help you ship more secure software faster and deliver better results for your customers.\n\n### Feature development\n\n- **Code Suggestions:** Helps developers write code more efficiently by generating code and showing suggestions as they type.\n\n- **Chat:** A conversational interface that answers questions and assists with various tasks throughout the development process.\n\n- **Code Explanation:** Helps understand selected code by providing clear explanations.\n\n- **GitLab Duo for the CLI:** Helps discover or recall Git commands when and where you need them.\n\n-  **Merge Commit Message Generation:** Helps merge more quickly by generating meaningful commit messages.\n\n- **Test Generation:** Helps catch bugs early by automatically generating tests for selected code.\n\n### Securing applications\n\n- **Vulnerability Explanation:** Shows information about security vulnerabilities in code and explains how to fix them.\n\n- **Vulnerability Resolution:** Helps resolve a vulnerability by generating a merge request that addresses it. (Beta)\n\n### Facilitating collaboration\n\n- **AI Impact Dashboard:** Measures the effectiveness and impact of AI on software development lifecycle metrics.\n\n- **Code Review Summary:** Makes merge request handover to reviewers easier by summarizing all the comments in a merge request review. (Experimental)\n\n- **Discussion Summary:** Helps everyone get up to speed by summarizing lengthy conversations in an issue. (Beta)\n\n- **Issue Description Generation:** Helps populate an issue quickly by generating a more in-depth description based on a short summary. (Experimental)\n\n- **Merge Request Summary:** Helps populate a merge request more quickly by generating a description based on the code changes. (Beta)\n\n- **Product Analytics:** Processes and responds to questions about your application's usage data.\n\n### Advanced troubleshooting\n\n- **Root Cause Analysis:** Helps determine the cause of CI/CD job failures by analyzing logs.\n\nThese components work together to provide comprehensive AI-assisted support throughout the software development lifecycle.\n\n## Benefits of GitLab Duo\n\nGitLab Duo offers numerous benefits to development teams and organizations. By integrating AI-powered assistance throughout the development lifecycle, it helps increase productivity, improve code quality, and enhance security. \nDevelopers can write code faster, understand complex codebases more easily, and catch potential issues earlier in the development process.\n\nGitLab Duo also helps streamline collaboration, speed up code reviews, and provide valuable insights into the impact of AI on ROI metrics. These benefits contribute to faster delivery of high-quality, secure software.\n\n## 5 videos and interactive tours to learn GitLab Duo\n\nTo help you get acquainted with GitLab Duo and its capabilities quickly, we've compiled a list of five videos and interactive tours. These visual learning tools provide an in-depth look at an array of GitLab Duo features and demonstrate how they can be integrated into your development workflow.\n\n__1. GitLab Duo Overview__\n\nThis comprehensive video introduces the core concepts of GitLab Duo and showcases its integration within the GitLab platform.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/o2xmLTV1y0I?si=90yPCHS_x2zSBAqe\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n__2. Code Suggestions in Action__\n\nAn interactive tour demonstrating how GitLab Duo Code Suggestions works in real-time, helping developers write code more efficiently.\n\n\u003Ca href=\"https://gitlab.navattic.com/code-suggestions\">\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175911/Blog/b5gdnls7jdyrpeyjby5j.png\" alt=\"GitLab Duo Code Suggestions cover image\">\u003C/a>\n\n__3. Vulnerability Resolution Walkthrough__\n\nThis video guide takes you through the process of using GitLab Duo to understand and resolve security vulnerabilities in your code.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/VJmsw_C125E?si=cUmRiQNJbrv5Yd9D\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n__4. Chat Demo__\n\nAn interactive session showing how developers can leverage GitLab Duo Chat to get answers, generate code, and solve problems throughout the development process.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"\nhttps://www.youtube.com/embed/RJezT5_V6dI?si=QomHCGUKstnAwplM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n__5. AI Impact Dashboard Tutorial__\n\nA detailed look at how to use and interpret the AI Impact Dashboard to measure the effectiveness of GitLab Duo in your development processes.\n\n\u003Ca href=\"https://gitlab.navattic.com/ai-impact\">\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175921/Blog/hn7gflmqswrjb33unuja.png\" alt=\"GitLab Duo AI Impact Dashboard cover image\">\u003C/a>\u003C/p>\n\n## Get started with GitLab Duo today\n\nThese videos and interactive tours offer practical insights into how GitLab Duo can enhance your development workflow. By exploring these resources, you'll gain a better understanding of how to leverage AI-powered assistance to improve productivity, code quality, and security in your projects.\n\n> #### Become a GitLab Duo expert: [Start your free, 60-day trial today!](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial&toggle=gitlab-duo-pro)\n\n## Read more\n- [10 best practices for using AI-powered GitLab Duo Chat](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/)\n- [Refactor code into modern languages with AI-powered GitLab Duo](https://about.gitlab.com/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo/)\n- [Developing GitLab Duo blog series](https://about.gitlab.com/blog/developing-gitlab-duo-series/)\n- [Mastering GitLab admin tasks with GitLab Duo Chat](https://about.gitlab.com/blog/mastering-gitlab-admin-tasks-with-gitlab-duo-chat/)",[9,704,724,725],"tutorial","product",{"slug":727,"featured":6,"template":680},"5-videos-and-interactive-tours-to-learn-gitlab-duo-fast","content:en-us:blog:5-videos-and-interactive-tours-to-learn-gitlab-duo-fast.yml","5 Videos And Interactive Tours To Learn Gitlab Duo Fast","en-us/blog/5-videos-and-interactive-tours-to-learn-gitlab-duo-fast.yml","en-us/blog/5-videos-and-interactive-tours-to-learn-gitlab-duo-fast",{"_path":733,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":734,"content":740,"config":747,"_id":749,"_type":14,"title":750,"_source":16,"_file":751,"_stem":752,"_extension":19},"/en-us/blog/5-ways-gitlab-duo-chat-ai-can-supercharge-product-management",{"title":735,"description":736,"ogTitle":735,"ogDescription":736,"noIndex":6,"ogImage":737,"ogUrl":738,"ogSiteName":693,"ogType":694,"canonicalUrls":738,"schema":739},"5 ways GitLab Duo Chat AI can supercharge product management","Discover how to transform all aspects of product management, boosting efficiency and improving decision-making. Learn practical tips for leveraging AI throughout your PM workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666405/Blog/Hero%20Images/GitLab_Duo_Blog_Hero_1800x945_r2_B__1_.png","https://about.gitlab.com/blog/5-ways-gitlab-duo-chat-ai-can-supercharge-product-management","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 ways GitLab Duo Chat AI can supercharge product management\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2024-09-25\",\n      }",{"title":735,"description":736,"authors":741,"heroImage":737,"date":743,"body":744,"category":10,"tags":745},[742],"Tim Rizzi","2024-09-25","As a product manager at GitLab, I constantly seek ways to enhance my productivity and decision-making. Recently, I discovered an unexpected ally in [GitLab Duo Chat](https://about.gitlab.com/gitlab-duo/). Let me share how this AI-powered assistant has transformed my approach to product management.\n\n## The daily PM challenges\n\nLike many PMs, I juggle many daily tasks — from summarizing issues and merge requests to crafting detailed product specs and investment cases. The sheer volume of writing and analysis required was overwhelming, and I worried about potential cognitive biases influencing my work.\n\nTo address these challenges, I needed to:\n\n1. Increase my efficiency in handling documentation tasks.\n2. Enhance the quality and objectivity of my product decisions.\n3. Improve my communication with various stakeholders.\n\n## Leveraging GitLab Duo Chat\n\nI decided to experiment with GitLab Duo Chat as a support tool for my daily PM tasks. Here's how I incorporated it into my workflow, with real examples:\n\n### 1. Issue creation and refinement\n\nI was tasked with creating an issue for a new feature that would enhance the Conan repository by adding [revision support](https://gitlab.com/gitlab-org/gitlab/-/issues/479437). To start, I prompted Chat: \"Can you create an issue to add support for downloading Conan revisions to the GitLab package registry? Think about the product's value from a C++ developer and a platform engineer perspective.\"\n\nGitLab Duo provided a comprehensive draft issue, including:\n\n* a clear description of the feature\n* value propositions for both C++ developers and platform engineers\n* implementation details\n* acceptance criteria\n* related links and labels\n\nThis gave me a solid starting point with all the necessary sections, which I refined and customized. Instead of spending an hour writing that issue, I spent more time thinking about how this feature fits within the broader GitLab strategy.\n\n### 2. Summarizing and reviewing\n\nI often ask GitLab Duo to summarize lengthy merge requests or complicated epics. For instance, when reviewing the epic for [protected container images](https://gitlab.com/groups/gitlab-org/-/epics/9825), I asked GitLab Duo to summarize the key changes and their value for GitLab customers and users from a PM perspective.\n\nGitLab Duo provided a detailed summary, highlighting the following:\n\n* enhanced security and compliance features\n* improved governance and control\n* streamlined operations\n* increased confidence in CI/CD pipelines\n* better alignment with DevOps practices\n* customizable security posture\n* improved audibility\n* cost-effective security solution\n\nThis summary helped me quickly grasp and share the key points with my team more effectively.\n\n### 3. Project status updates\n\nTo get a quick overview of a project's status, I asked GitLab Duo to provide an update \"like a hyper-focused project manager.\" The response included:\n\n* overall progress\n* completed items\n* in-progress tasks\n* next steps\n* timeline\n* risks and issues\n* stakeholder input\n* action items\n* key performance indicators\n* communication plans\n\nThis structured overview allowed me to quickly assess the project's status and identify areas needing attention.\n\n### 4. Value proposition and metric analysis\n\nWhen I needed to articulate the value of measuring monthly active users and storage costs for the Package stage, I asked GitLab Duo for help. The response provided a comprehensive explanation, covering:\n\n* user engagement and adoption insights\n* feature prioritization\n* capacity planning\n* business model optimization\n* customer success indicators\n* cost management\n* competitive positioning\n* product health assessment\n* ROI calculation\n* future planning considerations\n\nThis well-structured response gave me more than enough content and helped me better articulate the value of these essential metrics.\n\n### 5. Challenging cognitive biases\n\nTo reveal blind spots in my thinking, I often ask GitLab Duo to answer in specific personas, such as:\n\n* a hyper-focused project manager\n* a frustrated customer\n* a developer who doesn't have time to read issues\n* a product leader who demands excellence\n\nFor example, when I created an investment case for GitLab Package, I asked GitLab Duo to review it as a hypercritical CEO. This perspective helped me consider including financial projections and competitive analysis in my proposal, which I had initially overlooked.\n\n## A more efficient and effective PM\n\nThe impact of integrating GitLab Duo Chat into my workflow has been significant:\n\n1. **Increased productivity:** Tasks that used to take hours now often take minutes. Creating the initial draft of the Conan issue took about five minutes with GitLab Duo, compared to the usual 30-45 minutes I'd spend starting from scratch.\n2. **Enhanced quality:** The initial drafts produced with GitLab Duo's help are more comprehensive and structured. For the protected container images project, GitLab Duo's input helped me more effectively summarize the value of my go-to-market strategy and the project's current status.\n3. **Improved decision-making:** I've created more robust, well-rounded proposals using GitLab Duo to challenge my assumptions. The critique of my investment case led to a more thorough cost-benefit analysis.\n4. **Continuous improvement:** The feedback loop of writing, getting GitLab Duo's input, and refining has helped me improve my writing and analytical skills. My first drafts are becoming stronger, even without GitLab Duo's assistance.\n\n## A new era of AI-assisted product management\n\nWhile GitLab Duo Chat hasn't replaced my role as a PM, it has become an invaluable tool in my arsenal. It's helped me be more efficient, thorough, and objective. As AI assistants like GitLab Duo continue to evolve, I'm excited about the potential for further enhancing our product management practices.\n\nHowever, it's crucial to remember that GitLab Duo is a tool, not a replacement for human insight and creativity. The best results come from combining GitLab Duo's capabilities with our expertise and understanding of our unique business context.\n\n## Try GitLab Duo\n\nI encourage fellow PMs to explore how AI assistants like GitLab Duo Chat can augment their work. Here are some steps you can take:\n\n1. **Start small:** Use GitLab Duo for simple tasks like summarizing issues or drafting initial proposals.\n2. **Experiment with personas:** Ask GitLab Duo to review your work from different perspectives to uncover blind spots.\n3. **Refine your prompts:** Learn how to craft effective prompts to get the most valuable responses from GitLab Duo.\n4. **Share your experiences:** Discuss your use of AI tools with your team and contribute to best practices.\n\nWith the right approach, these tools can help us focus more on strategic thinking and less on routine tasks, ultimately leading to better products and happier customers.\n\n> [Try GitLab Duo free for 60 days today!](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial&toggle=gitlab-duo-pro)\n",[9,725,746],"workflow",{"slug":748,"featured":91,"template":680},"5-ways-gitlab-duo-chat-ai-can-supercharge-product-management","content:en-us:blog:5-ways-gitlab-duo-chat-ai-can-supercharge-product-management.yml","5 Ways Gitlab Duo Chat Ai Can Supercharge Product Management","en-us/blog/5-ways-gitlab-duo-chat-ai-can-supercharge-product-management.yml","en-us/blog/5-ways-gitlab-duo-chat-ai-can-supercharge-product-management",{"_path":754,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":755,"content":761,"config":767,"_id":769,"_type":14,"title":770,"_source":16,"_file":771,"_stem":772,"_extension":19},"/en-us/blog/a-developers-guide-to-building-an-ai-security-governance-framework",{"title":756,"description":757,"ogTitle":756,"ogDescription":757,"noIndex":6,"ogImage":758,"ogUrl":759,"ogSiteName":693,"ogType":694,"canonicalUrls":759,"schema":760},"A developer's guide to building an AI security governance framework","Learn the strategies and practices to adopt for secure and responsible development and use of AI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664638/Blog/Hero%20Images/applicationsecurity.png","https://about.gitlab.com/blog/a-developers-guide-to-building-an-ai-security-governance-framework","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A developer's guide to building an AI security governance framework\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ayoub Fandi\"}],\n        \"datePublished\": \"2024-04-23\",\n      }",{"title":756,"description":757,"authors":762,"heroImage":758,"date":764,"body":765,"category":10,"tags":766},[763],"Ayoub Fandi","2024-04-23","Artificial Intelligence (AI) has firmly established itself as a pillar of digital transformation, disrupting industries, increasing efficiency, and providing unmatched access to large data sets. AI also raises profound questions regarding security governance. How do I ensure I can leverage the best of what AI has to offer while mitigating its potential security risks? As [AI continues to advance](https://about.gitlab.com/topics/devops/the-role-of-ai-in-devops/), there is a growing need for strong oversight and accountability. This article delves into the complex landscape of AI security governance, exploring various frameworks, strategies, and practices that organizations like GitLab are adopting to ensure the responsible development of AI technologies and features.\n\n## Greater scrutiny on AI\n\n### AI: Single term, numerous realities\nAI isn't a monolithic entity - it encompasses a spectrum of technologies and applications. From machine learning algorithms that power recommendation systems to advanced natural language processing models like Anthropic’s Claude 3, each AI system brings its unique set of opportunities and challenges.\n\nAccording to [a 2023 MITRE report](https://www.mitre.org/sites/default/files/2023-06/PR-23-1943-A-Sensible-Regulatory-Framework-For-AI-Security_0.pdf), three main areas of AI currently exist:\n\n1. **AI as a subsystem**\n\n\u003Cp>\u003C/p>\u003Ci>\"AI is embedded in many software systems. Discrete AI models routinely perform machine perception and optimization functions, from face recognition in photos uploaded to the cloud, to dynamically allocating and optimizing network resources in 5G wireless networks.\n  \u003Cp>\u003C/p>\n\"There are a wide range of vulnerabilities and threats against these types of AI subsystems – from data poisoning attacks to adversarial input attacks – that can be used to manipulate subsystems.\"\u003C/i>\u003Cp>\u003C/p>\n\n2. **AI as human augmentation**\n\u003Cp>\u003C/p>\u003Ci>\"Another application of AI is in augmenting human performance, allowing a person to operate with much larger scope and scale. This has wide-ranging implications for workforce planning as AI has the potential to increase productivity and shift the composition of labor markets, similar to the role of automation in the manufacturing industry. \n  \u003Cp>\u003C/p>\n\"While sophisticated hackers and military information operations can already generate believable content today using techniques such as computer-generated imagery, LLMs will make that capability available to anyone, while increasing the scope and scale at which the professionals can operate.\"\u003C/i>\u003Cp>\u003C/p>\n\n3. **AI with agency**\n\u003Cp>\u003C/p>\u003Ci>\"A segment of the tech community is increasingly concerned about scenarios where sophisticated AI could operate as an independent, goal-seeking agent. While science fiction historically embodied this AI in anthropomorphic robots, the AI we have today is principally confined to digital and virtual domains.\n\u003Cp>\u003C/p>\n\"One scenario is an AI model given a specific adversarial agenda. Stuxnet is perhaps an early example of sophisticated, AI-fueled, goal-seeking malware with an arsenal of zero-day attacks that ended up escaping onto the internet.\"\u003C/i>\u003Cp>\u003C/p>\n\nYou can focus your efforts in terms of security governance based on which areas your company is looking to adopt and the expected business benefits.\u003Cp>\u003C/p>\n\n### Frameworks for AI security governance\nFor effective AI security governance, we must navigate the complex landscape of guidelines and principles developed by various organizations.\n\nGovernments, international organizations, and tech companies have all played their part in shaping AI security governance frameworks. You can review the frameworks below and choose those that are relevant and/or apply to your organization:\n\n- [NIST AI Risk Management Framework (AI RMF)](https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.100-1.pdf)\n- [Google’s Security Artificial Intelligence Framework](https://services.google.com/fh/files/blogs/google_secure_ai_framework_approach.pdf)\n- [OWASP Top 10 for LLMs](https://owasp.org/www-project-top-10-for-large-language-model-applications/assets/PDF/OWASP-Top-10-for-LLMs-2023-v1_0.pdf)\n- [The UK’s NCSC Principles for the Security of Machine Learning](https://www.ncsc.gov.uk/files/Principles-for-the-security-of-machine-learning.pdf)\n\nWhile these frameworks provide valuable guidance, they also introduce complexity. Organizations must determine which apply to their AI usage and how they align to their practices. Moreover, the dynamic nature of AI requires continuous adaptation to stay secure.\n\nSomething to note is that if you read through these frameworks, you’ll notice that numerous controls overlap with standard security best practices. This isn’t a coincidence. A strong overall security program is a prerequisite for proper AI security governance.\n\n## How-to: AI security governance\n### The why and the what\nAI security governance starts with understanding what AI technologies your organization is using or developing, why you are using them, and where these technologies fit into your operations. It's essential to define clear objectives and identify potential security risks associated with AI deployment. This introspection lays the foundation for effective AI security governance.\n\n#### The why\n\nUnderstanding the \"why\" behind each AI application is pivotal to build effective security governance. Each AI system deployed has to serve a specific purpose. Is AI being utilized to enhance customer experiences, automate manual tasks, or support the decision-making process? \n\nBy uncovering the motivations driving AI initiatives, organizations can align these projects with their broader business objectives. This alignment ensures that AI investments are strategically focused, delivering value in line with organizational goals. It also aids in prioritizing AI systems that have a more significant impact on the core mission of the company.\n\n#### The what\nIn the realm of AI security governance, the foundational step is conducting a comprehensive inventory of all AI systems, algorithms, and data sources within your organization. This includes meticulously cataloging all AI technologies in use, ranging from machine learning models and natural language processing algorithms to computer vision systems. This would also involve identifying the data sources feeding these AI systems, and their origins (internal databases, customer interactions, or third-party data providers). Such an inventory provides three main benefits: \n- to gain a holistic understanding of the AI ecosystem within the organization \n- to establish a strong basis for monitoring, auditing, and managing these assets effectively\n- to focus security efforts on the high-risk/critical areas\n\n### How to develop a security risk management program\nA robust security risk management program is at the core of responsible AI security governance. The critical building blocks for this program are the what and the why we discussed earlier. \n\nSpecificities of AI make security risk management more complex. In the NIST AI RMF mentioned earlier, numerous challenges are highlighted, including:\n\n- Difficult to measure AI-related security risks\n    - Potential security risks could emerge from the AI model, the software on which you are training the model, or the data ingested by the model. Different stages of the AI lifecycle might also trigger specific security risks depending on which actors (producers, developers, or consumers) are leveraging the AI solution.\n- Risk tolerance threshold might be complex to determine \n    - As the potential security risks aren’t easily identifiable, determining the risk tolerance your organization can withstand regarding AI can be a very empirical exercise.\n- Not considering AI in isolation \n    - Security governance of AI systems should be part of your security risk management strategy. Different users might have different parts of the overall picture. Ensuring you have complete information and full visibility into the AI lifecycle is critical to making the best decisions.\n\nSecurity risk management should be an ongoing process, adapting to the quickly evolving AI landscape. Reassessing the program, reviewing assumptions regarding the environment and involving additional business stakeholders are activities that should be happening on a regular basis.\n\n## AI security governance and the GitLab DevSecOps platform\n### Using AI to power DevSecOps \nLet’s take [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI capabilities to help power DevSecOps workflows, as an example. [GitLab Duo Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) helps developers write code more efficiently by using generative AI to assist in software engineering tasks. It works either through code completion or through code generation using natural language code comment blocks.\n\nTo ensure it can be fully leveraged, security needs of potential users and customers have to be considered. As an example, data used to produce Code Suggestions is immediately discarded by the AI models. \n\nAll of GitLab’s AI providers are subject to contractual terms with GitLab that prohibit the use of customer content for the provider’s own purposes, except to perform their independent legal obligations. [GitLab’s own privacy policy](https://about.gitlab.com/privacy/) prevents us from using customer data to train models without customer consent. \n\nOf course, to fully benefit from Code Suggestions, you should:\n- understand and review all suggestions to see if they align with your development guidelines\n- limit providing sensitive information or proprietary code in prompts \nensure the suggestion follows the same secure coding guidelines your company has\n- review the code using automated scanning for vulnerable dependencies, input validation and output sanitization, as well as license checks\n\n### Securing AI\nManaging the output of AI systems is equally important as managing the input. Security scanning tools can help identify vulnerabilities and potential threats in AI-generated code. \n\nManaging AI output requires a systematic approach to code review and validation. Organizations should [integrate security scanning tools into their CI/CD pipelines](https://docs.gitlab.com/ee/user/application_security/), ensuring that AI-generated code is checked for security vulnerabilities before deployment. Automated security checks can help detect vulnerabilities early in the development process, reducing the risk of potential vulnerable code stemming from suggested code blocks being merged.\n\nFor any GitLab Duo generated code, changes are managed via merge requests which trigger your CI pipeline (including any security and code quality scanning you have configured). This ensures any governance rules you have set up for your merge requests like required approvals are enforced.\n\nAI systems are systems. Existing security controls apply to AI systems the same way they would apply to the rest of your environment. Common security controls around application security still apply, including [security reviews](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/data_usage.html), security scanning, [threat modeling](https://danielmiessler.com/p/athi-an-ai-threat-modeling-framework-for-policymakers), encryption, etc. The [Google Secure AI Framework](https://services.google.com/fh/files/blogs/google_secure_ai_framework_approach.pdf) highlights these six elements:\n- expand strong security foundations to the AI ecosystem\n- extend detection and response to bring AI into an organization’s threat universe\n- automate defenses to keep pace with existing and new threats\n- harmonize platform-level controls to ensure consistent security across the organization\n- adapt controls to adjust mitigations and create faster feedback loops for AI deployment\n- contextualize AI system risks in surrounding business processes\n\nIf you have a strong security program, managing AI will be an extension of your current program and account for specific risks and vulnerabilities.\n\n## How GitLab Duo is secured\nGitLab recognizes the significance of security in AI governance. Our very strong security program is focused on ensuring our customers can fully leverage [GitLab Duo](https://docs.gitlab.com/ee/user/ai_features.html) in a secure manner. This is how the security departments are collaborating to secure GitLab’s AI features GitLab:\n- **Security Assurance:** Seeks to address our compliance requirements regarding security, that AI security risks are identified and properly managed, and that our customers understand how we secure our application, infrastructure, and services.\n\n- **Security Operations:** Monitors our infrastructure and quickly responds to threats using a team of skilled engineers as well as automation capabilities, helping to ensure AI features aren’t abused or used in a malevolent manner.\n\n- **Product Security:** Helps the product and engineering teams by providing security expertise for our AI features and helping to secure the underlying infrastructure on which our product is hosted.\n\n- **Corporate Security and IT Operations:** Finds potential vulnerabilities in our product to proactively mitigate and support other departments by performing research on relevant security areas.\n\nOur Security team works closely with GitLab's Legal and Corporate Affairs team to ensure our framework for AI security governance is comprehensive. The recent launch of the [GitLab AI Transparency Center](https://about.gitlab.com/blog/introducing-the-gitlab-ai-transparency-center/) showcases our commitment to implementing a strong AI governance. We published our AI ethics principles as well as our AI continuity plan to demonstrate our AI resiliency.\n\n## Learn more\nAI security governance is a complex area, especially as the field is in a nascent form. As AI continues to support our workflows and accelerate our processes, responsible AI security governance becomes a key pillar of any security program. By understanding the nuances of AI, enhancing your risk management program, and using AI features that are developed responsibly, you can ensure that AI-powered workflows follow the principles of security, privacy, and trust. \n\n>  Learn more about [GitLab Duo AI features](https://about.gitlab.com/gitlab-duo/).\n",[9,703,678,184],{"slug":768,"featured":91,"template":680},"a-developers-guide-to-building-an-ai-security-governance-framework","content:en-us:blog:a-developers-guide-to-building-an-ai-security-governance-framework.yml","A Developers Guide To Building An Ai Security Governance Framework","en-us/blog/a-developers-guide-to-building-an-ai-security-governance-framework.yml","en-us/blog/a-developers-guide-to-building-an-ai-security-governance-framework",{"_path":774,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":775,"content":781,"config":788,"_id":790,"_type":14,"title":791,"_source":16,"_file":792,"_stem":793,"_extension":19},"/en-us/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q",{"title":776,"description":777,"ogTitle":776,"ogDescription":777,"noIndex":6,"ogImage":778,"ogUrl":779,"ogSiteName":693,"ogType":694,"canonicalUrls":779,"schema":780},"Accelerate code reviews with GitLab Duo and Amazon Q","Use AI-powered agents to optimize code reviews by automatically analyzing merge requests and providing comprehensive feedback on bugs, readability, and coding standards.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750096976/Blog/Hero%20Images/Blog/Hero%20Images/Screenshot%202024-11-27%20at%204.55.28%E2%80%AFPM_4VVz6DgGBOvbGY8BUmd068_1750096975734.png","https://about.gitlab.com/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Accelerate code reviews with GitLab Duo and Amazon Q\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2025-06-02\",\n      }",{"title":776,"description":777,"authors":782,"heroImage":778,"date":783,"body":784,"category":10,"tags":785},[720],"2025-06-02","Code reviews are critical for catching bugs, improving code readability, and maintaining coding standards, but they can also be a major bottleneck in your workflow. When you're trying to ship features quickly, waiting for multiple team members to review your code can be frustrating. The back-and-forth discussions, the scheduling conflicts, and the time it takes to get everyone aligned can stretch what should be a simple review into days or even weeks.\n\nHere's where [GitLab Duo with Amazon Q](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/), our new offering that delivers agentic AI throughout the software development lifecycle for AWS customers, comes in to transform your review process. This intelligent, AI-powered solution can perform comprehensive code reviews for you in a fraction of the time it would take your human colleagues. By leveraging advanced agentic AI capabilities, GitLab Duo with Amazon Q streamlines your entire review workflow without sacrificing the quality and thoroughness you need. Think of it as having an always-available, highly skilled reviewer who can instantly analyze your code and provide actionable feedback.\n\n## How it works: Launching a code review\n\nSo how does GitLab Duo with Amazon Q actually work? Let's say you've just finished working on a feature and created a merge request with multiple code updates. Instead of pinging your teammates and waiting for their availability, you simply enter a quick command in the comment section: \"/q review\". That's it – just those two words trigger the AI to spring into action.\n\n![Triggering a code review using GitLab Duo with Amazon Q](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097002/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097002096.png)\n\nOnce you've entered the command, Amazon Q Service immediately begins analyzing your code changes. You'll see a confirmation that the review is underway, and within moments, the AI is examining every line of your updates, checking for potential issues across multiple dimensions.\nWhen the review completes, you receive comprehensive feedback that covers all the bases: bug detection, readability improvements, syntax errors, and adherence to your team's coding standards. The AI doesn't just point out problems, it provides context and suggestions for fixing them, making it easy for you to understand what needs attention and why.\n\nThe beauty of this agentic AI approach is that it handles the heavy lifting of code review while you focus on what matters most: building great software. You get the benefits of thorough code reviews — better bug detection, consistent coding standards, and improved code quality — without the time sink. Your deployment times shrink dramatically because you're no longer waiting in review queues, and your entire team becomes more productive.\n\n## Why use GitLab Duo with Amazon Q?\n\nGitLab Duo with Amazon Q transforms your development workflow in the following ways:\n- Lightning-fast code reviews that don't compromise on quality\n- Consistent application of coding standards across your entire codebase\n- Immediate feedback that helps you fix issues before they reach production\n- Reduced deployment times that let you ship features faster\n- More time for your team to focus on creative problem-solving instead of repetitive reviews\n\nReady to see this game-changing feature in action? Watch how GitLab Duo with Amazon Q can revolutionize your code review process:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4gFIgyFc02Q?si=GXVz--AIrWiwzf-I\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n> To learn more about GitLab Duo with Amazon Q visit us at an upcoming [AWS Summit in a city near you](https://about.gitlab.com/events/aws-summits/) or [reach out to your GitLab representative](https://about.gitlab.com/partners/technology-partners/aws/#form).\n> \n> And make sure to join the GitLab 18 virtual launch event to learn about our agentic AI plans and more. [Register today!](https://about.gitlab.com/eighteen/)",[9,482,786,725,704,282,787,724],"code review","AWS",{"slug":789,"featured":91,"template":680},"accelerate-code-reviews-with-gitlab-duo-and-amazon-q","content:en-us:blog:accelerate-code-reviews-with-gitlab-duo-and-amazon-q.yml","Accelerate Code Reviews With Gitlab Duo And Amazon Q","en-us/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q.yml","en-us/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q",{"_path":795,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":796,"content":802,"config":809,"_id":811,"_type":14,"title":812,"_source":16,"_file":813,"_stem":814,"_extension":19},"/en-us/blog/agentic-ai-guides-and-resources",{"title":797,"description":798,"ogTitle":797,"ogDescription":798,"noIndex":6,"ogImage":799,"ogUrl":800,"ogSiteName":693,"ogType":694,"canonicalUrls":800,"schema":801},"Agentic AI guides and resources","Learn everything you need to know about agentic AI, including what it is, how it works, why it levels up your DevSecOps environment, and best practices for implementation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749658912/Blog/Hero%20Images/blog-image-template-1800x945__20_.png","https://about.gitlab.com/blog/agentic-ai-guides-and-resources","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Agentic AI guides and resources\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2025-05-07\",\n      }",{"title":797,"description":798,"authors":803,"heroImage":799,"date":805,"body":806,"category":10,"tags":807,"updatedDate":808},[804],"GitLab","2025-05-07","## Defining agentic AI\n\nAgentic AI is a type of artificial intelligence that leverages advanced language models and natural language processing to take independent action. Unlike traditional generative AI tools that require constant human direction, these systems can understand requests, make decisions, and execute multi-step plans to achieve goals. They tackle complex tasks by breaking them into manageable steps and employ adaptive learning to modify their approach when facing challenges.\n\n[Learn more about agentic AI](https://about.gitlab.com/topics/agentic-ai/)\n\n## Agentic AI insights\n- [GitLab Duo Agent Platform Public Beta: Next-gen AI orchestration and more](https://about.gitlab.com/blog/gitlab-duo-agent-platform-public-beta/) — Introducing the DevSecOps orchestration platform designed to unlock asynchronous collaboration between developers and AI agents.\n- [GitLab Duo Agent Platform: What's next for intelligent DevSecOps](https://about.gitlab.com/blog/gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops/) — GitLab Duo Agent Platform, a DevSecOps orchestration platform for humans and AI agents, leverages agentic AI for collaboration across the software development lifecycle.\n- [From vibe coding to agentic AI: A roadmap for technical leaders](https://about.gitlab.com/the-source/ai/from-vibe-coding-to-agentic-ai-a-roadmap-for-technical-leaders/) — Discover how to implement vibe coding and agentic AI in your development process to increase productivity while maintaining code quality and security.\n- [Emerging agentic AI trends reshaping software development](https://about.gitlab.com/the-source/ai/emerging-agentic-ai-trends-reshaping-software-development/) — Discover how agentic AI transforms development from isolated coding to intelligent workflows that enhance productivity while maintaining security.\n- [Agentic AI: Unlocking developer potential at scale](https://about.gitlab.com/the-source/ai/agentic-ai-unlocking-developer-potential-at-scale/) — Explore how agentic AI is transforming software development, moving beyond code completion to create AI partners that proactively tackle complex tasks.\n- [Agentic AI, self-hosted models, and more: AI trends for 2025](https://about.gitlab.com/the-source/ai/ai-trends-for-2025-agentic-ai-self-hosted-models-and-more/) — Discover key trends in AI for software development, from on-premises model deployments to intelligent, adaptive AI agents.\n- [How agentic AI unlocks platform engineering potential](https://about.gitlab.com/the-source/ai/how-agentic-ai-unlocks-platform-engineering-potential/) — Explore how agentic AI elevates platform engineering by automating complex workflows and scaling standardization.\n\n## The agentic AI ecosystem\n- [AI-driven code analysis: The new frontier in code security](https://about.gitlab.com/topics/agentic-ai/ai-code-analysis/) \n- [DevOps automation & AI agents](https://about.gitlab.com/topics/agentic-ai/devops-automation-ai-agents/)\n- [AI-augmented software development: Agentic AI for DevOps](https://about.gitlab.com/topics/agentic-ai/ai-augmented-software-development/)\n\n## Best practices for implementing agentic AI\n\n- [Implementing effective guardrails for AI agents](https://about.gitlab.com/the-source/ai/implementing-effective-guardrails-for-ai-agents/) — Discover essential security guardrails for AI agents in DevSecOps, from compliance controls and infrastructure protection to user access management.\n\n## GitLab's agentic AI offerings\n\n### GitLab Duo with Amazon Q\n\n- [GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/) — The comprehensive AI-powered DevSecOps platform combined with the deepest set of cloud computing capabilities speeds dev cycles, increases automation, and improves code quality.\n- [DevSecOps + Agentic AI: Now on GitLab Self-Managed Ultimate on AWS](https://about.gitlab.com/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws/) — Start using AI-powered, DevSecOps-enhanced agents in your AWS GitLab Self-Managed Ultimate instance. Enjoy the benefits of GitLab Duo and Amazon Q in your organization.\n- [GitLab Duo with Amazon Q partner page](https://about.gitlab.com/partners/technology-partners/aws/)\n\nWatch GitLab Duo with Amazon Q in action:\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1075753390?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Technical Demo: GitLab Duo with Amazon Q\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n#### Guided tour\nClick on the image to start a tour of GitLab Duo with Amazon Q:\n\n[![GitLab Duo with Amazon Q interactive tour](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673568/Blog/Content%20Images/Screenshot_2025-05-07_at_7.24.45_AM.png)](https://gitlab.navattic.com/duo-with-q)\n\n#### GitLab Duo with Amazon Q tutorials\n- [Enhance application quality with AI-powered test generation](https://about.gitlab.com/blog/enhance-application-quality-with-ai-powered-test-generation/) — Learn how GitLab Duo with Amazon Q improves the QA process by automatically generating comprehensive unit tests.\n- [GitLab Duo + Amazon Q: Transform ideas into code in minutes](https://about.gitlab.com/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes/) — The new GitLab Duo with Amazon Q integration analyzes your issue descriptions and automatically generates complete working code solutions, accelerating development workflows.\n- [Accelerate code reviews with GitLab Duo and Amazon Q](https://about.gitlab.com/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q/) — Use AI-powered agents to optimize code reviews by automatically analyzing merge requests and providing comprehensive feedback on bugs, readability, and coding standards.\n- [Speed up code reviews: Let AI handle the feedback implementation](https://about.gitlab.com/blog/speed-up-code-reviews-let-ai-handle-the-feedback-implementation/) — Discover how GitLab Duo with Amazon Q automates the implementation of code review feedback through AI, transforming a time-consuming manual process into a streamlined workflow.\n\n### GitLab Duo Agentic Platform\n- [GitLab Duo Chat gets agentic AI makeover](https://about.gitlab.com/blog/gitlab-duo-chat-gets-agentic-ai-makeover/) — Our new Duo Chat experience, currently an experimental release, helps developers onboard to projects, understand assignments, implement changes, and more.\nWatch GitLab Duo Agent Platform in action:\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1095679084?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Agent Platform Demo Clip\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n#### GitLab Agent Platform (and Duo Workflow) tutorials and use cases\n- [Accelerate learning with GitLab Duo Agent Platform](https://about.gitlab.com/blog/accelerate-learning-with-gitlab-duo-agent-platform/) — Learn how agentic AI helped generate comprehensive gRPC documentation in minutes, not hours.\n- [Fast and secure AI agent deployment to Google Cloud with GitLab](https://about.gitlab.com/blog/fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab/)\n\n- [Refactoring JavaScript to TypeScript with GitLab Duo Workflow](https://about.gitlab.com/blog/refactoring-javascript-to-typescript-with-gitlab-duo-workflow/)\n\n- [Automating tedious coding tasks with GitLab Duo Workflow](https://about.gitlab.com/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow/) — See how agentic AI can reduce time spent on repetitive tasks, freeing you up to focus on developing innovative solutions and shipping the next big thing.\n\n- [Use GitLab Duo Workflow to improve application quality assurance](https://about.gitlab.com/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance/) — Learn step-by-step how to add unit tests to a Java application using agentic AI (includes a video tutorial).\n\n- [Solving complex challenges with GitLab Duo Workflow](https://about.gitlab.com/blog/solving-complex-challenges-with-gitlab-duo-workflow/) — Learn how a member of the GitLab Customer Success Management team uses agentic AI for real-world problem-solving, including addressing Helm chart limits in the package registry.\n\n## Learn more with GitLab University\n\n- [Get Started with GitLab Duo coursework](https://university.gitlab.com/pages/ai)\n- [GitLab Duo Enterprise Learning Path](https://university.gitlab.com/learning-paths/gitlab-duo-enterprise-learning-path)\n\n## More AI resources\n\n- [2024 Global DevSecOps Survey: Navigating AI maturity in DevSecOps](https://about.gitlab.com/developer-survey/2024/ai/)\n- [The Role of AI in DevOps](https://about.gitlab.com/topics/devops/the-role-of-ai-in-devops/)\n- [The latest AI/ML articles from GitLab](https://about.gitlab.com/blog/categories/ai-ml/)\n- [GitLab Duo](https://about.gitlab.com/gitlab-duo/)",[9,482,724],"2025-06-10",{"slug":810,"featured":91,"template":680},"agentic-ai-guides-and-resources","content:en-us:blog:agentic-ai-guides-and-resources.yml","Agentic Ai Guides And Resources","en-us/blog/agentic-ai-guides-and-resources.yml","en-us/blog/agentic-ai-guides-and-resources",{"_path":816,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":817,"content":823,"config":829,"_id":831,"_type":14,"title":832,"_source":16,"_file":833,"_stem":834,"_extension":19},"/en-us/blog/ai-assisted-code-suggestions",{"title":818,"description":819,"ogTitle":818,"ogDescription":819,"noIndex":6,"ogImage":820,"ogUrl":821,"ogSiteName":693,"ogType":694,"canonicalUrls":821,"schema":822},"How AI-assisted code suggestions will advance DevSecOps","In this second blog in our ‘Future of AI/ML in DevSecOps’ series, find out the impact of AI Assisted code suggestions on the software development lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662840/Blog/Hero%20Images/ai-experiment-stars.png","https://about.gitlab.com/blog/ai-assisted-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How AI-assisted code suggestions will advance DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Neha Khalwadekar\"}],\n        \"datePublished\": \"2023-03-23\",\n      }",{"title":818,"description":819,"authors":824,"heroImage":820,"date":826,"body":827,"category":10,"tags":828},[825],"Neha Khalwadekar","2023-03-23","\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nArtificial intelligence (AI) and machine learning (ML) have made incredible technological strides and are now poised to [impact the software development process](/topics/devops/the-role-of-ai-in-devops/). As we can see, AI code suggestion proposals have already had a tremendous influence in helping programmers reduce repetitive tasks. AI-assisted code suggestions will enable developers to speed up coding, debugging, refactoring, documentation, and many more tasks, greatly enhancing the software development lifecycle (SDLC).\n\n![Trends adopting AI/ML from GitLab's DevSecOps Survey](https://about.gitlab.com/images/blogimages/ai-ml-adoption-03-2023.png){: .shadow}\n\n## What are suggestions for AI-assisted code?\n\nML techniques are used in AI-assisted code suggestions to assess code and recommend improvements. These recommendations involve modifying the syntax, streamlining the organization of the code, or suggesting more effective methods. By lowering errors, increasing effectiveness, and providing optimization advice, the aim is to assist developers in writing better code faster.\n\n![Animated gif image of code suggestions](https://about.gitlab.com/images/15_9/DemoFastApi.gif){: .shadow}\n\n## How can AI-assisted code suggestions help?\n\nAI-assisted code suggestions can substantially improve the programming experience by reducing errors and helping programmers write code faster, which will help reproduce the much higher production code quality. \n\nHere are some of those SDLC improvements:\n\n- **Decreased errors, increased accuracy.** The capacity of AI-assisted code suggestions to decrease errors and increase accuracy is a critical advantage over manually written code. Developers can prevent common syntax errors, organize their code better, and boost algorithm performance with code suggestions. This leads to more dependable and effective code, which produces fewer defects and higher-quality software.\n- **A rise in productivity.** AI-assisted code suggestions can increase developers' efficiency by producing better code faster and more efficiently, saving time and money. Additionally, code suggestions can automate repetitive activities like formatting code, freeing engineers to concentrate on more complex jobs.\n- **Improved collaboration.** AI-assisted code recommendations can improve developer collaboration. Code suggestions can ensure all developers are on the same page by offering consistent coding standards and ideas for improvement. This will lessen the possibility of mistakes and facilitates efficient teamwork.\n- **Faster rollout and iteration.** AI-assisted code recommendations can hasten the deployment and iteration processes. With fewer errors and more effective code, developers can iterate and release updates faster. Code reviews also are faster and more efficient. As a result, enterprises can quickly bring new features to market, providing them with a substantial competitive edge.\n\n### GitLab’s competitive advantages\n\nGitLab’s unified DevSecOps platform enables businesses to deliver software more quickly and efficiently while enhancing security and compliance and maximizing the total return of investment on software development. We anticipate GitLab AI Assisted Code Suggestions will extend and amplify these benefits to improve developer productivity, focus, and innovation without context switching and within a single DevSecOps platform using the GitLab Workflow VS Code extension to get code suggestions as they type. Depending on the user prompts, the extension provides entire code snippets like generating functions or completing the current line. Simply pressing the tab key enables you to accept the suggestions.\n\nAs AI technologies advance in sophistication, they will provide more individualized and nuanced ideas, increasing their value to programmers.\n\nThe low-code/no-code development sectors are where AI-assisted code suggestions are anticipated to have substantial impact. As these development platforms spread, we envision bringing AI-powered tools that can offer recommendations and optimizations to simplify the software creation and deployment process for non-technical users on GitLab.com. \n\nThe following are some of the critical jobs we intend to address for our customers with AI Assisted Code Suggestions in the DevSecOps Platform:\n\n- **Code optimization.** How can we drastically reduce the time and effort required for developers to examine and test their code by identifying redundant or inefficient lines of code and suggesting streamlined alternatives?\n- **Automatic bug detection and patching.** How can we analyze sizable codebases to find potential bugs or security flaws and can also offer patches to fix them?\n- **Smart debugging.** How can we assist developers in locating faults precisely and make suggestions for potential fixes? This can result in considerable time and effort savings for developers and quicker bug response.\n- **Continuous integration and deployment.** How can we facilitate continuous integration and deployment by identifying code changes that could cause potential conflicts? This will enable developers to resolve issues quickly and roll out production code faster.\n- **Predictive maintenance.** How can we monitor the performance of the code and find potential issues before they become serious? As a result, developers may proactively address faults, leading to more dependable and stable software.\n- **Programming in natural language.** How can we allow developers to build code via simple natural-language commands? This can result in more efficient development and a much shorter learning curve for new developers.\n- **Test case generation and automation.** How can we generate test cases and automate the testing process? In addition to ensuring that software is adequately tested before it is deployed, this can cut down on the time and effort needed for testing.\n- **Smart code completion.** How can we ensure developers write code faster and more precisely, which completes code snippets based on context? This may lead to fewer mistakes and more effective development.\n\nGitLab’s AI Assisted Code Suggestions are available to select Ultimate customers in a closed beta. For early access consideration, Ultimate customers can [submit this form](https://docs.google.com/forms/d/e/1FAIpQLSdSixexFKnIkFGBbmx6XJfBdEBACowhsO-DOm82q4rrAAuYmA/viewform). We’re working towards a wider open beta of this capability in the next few months. \n\nContinue reading our ongoing series, \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\".\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[703,725,704,9],{"slug":830,"featured":6,"template":680},"ai-assisted-code-suggestions","content:en-us:blog:ai-assisted-code-suggestions.yml","Ai Assisted Code Suggestions","en-us/blog/ai-assisted-code-suggestions.yml","en-us/blog/ai-assisted-code-suggestions",{"_path":836,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":837,"content":843,"config":850,"_id":852,"_type":14,"title":853,"_source":16,"_file":854,"_stem":855,"_extension":19},"/en-us/blog/ai-ml-in-devsecops-series",{"title":838,"description":839,"ogTitle":838,"ogDescription":839,"noIndex":6,"ogImage":840,"ogUrl":841,"ogSiteName":693,"ogType":694,"canonicalUrls":841,"schema":842},"AI/ML in DevSecOps Series","This blog series chronicles our journey to integrate AI/ML throughout the software development lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682693/Blog/Hero%20Images/ai-ml-in-devsecops-blog-series.png","https://about.gitlab.com/blog/ai-ml-in-devsecops-series","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"AI/ML in DevSecOps Series\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab AI Assisted Group\"}],\n        \"datePublished\": \"2023-04-24\",\n      }",{"title":838,"description":839,"authors":844,"heroImage":840,"date":846,"body":847,"category":10,"tags":848},[845],"GitLab AI Assisted Group","2023-04-24","\n\nOur \"AI/ML in DevSecOps\" series tracks GitLab's journey to build and integrate AI/ML into our DevSecOps platform. Throughout the series, we’ll feature blogs from our product, engineering, and UX teams to showcase how we’re infusing AI/ML into GitLab. So be sure to bookmark this page and follow along.\n\nThis series details [many features introduced during our AI Fireside Chat](/blog/gitlab-ai-assisted-features/) on May 3, 2023.\n\nGet a [full overview of our AI-powered DevSecOps platform](https://about.gitlab.com/solutions/ai/). \n\n1. [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/)\nGitLab users already benefit from a step-function increase in productivity when they adopt our platform: streamlined collaboration, operational efficiencies, and massive acceleration in time to delivery. But by introducing machine learning (ML) and other artificial intelligence (AI) capabilities into the fabric of The DevSecOps Platform feature set, we aim to take those gains to a whole new level.\n\n2. [How AI-assisted code suggestions will advance DevSecOps](/blog/ai-assisted-code-suggestions/)\nAI-assisted code suggestions can substantially improve the programming experience by reducing errors and helping programmers write code faster, which will help reproduce the much higher production code quality.\n\n3. [ML experiment: Writing SQL is about to get a lot easier](/blog/ml-experiment-sql/)\nWith the recent advancements in AI and natural language processing, it's now possible for AI models to generate SQL code from simple English language queries. This means that even people without a deep understanding of SQL can generate complex queries to analyze their data. This technology not only improves accessibility but can also save valuable time and effort for data analysts.\n\n4. [ML experiment: Explain this source code](/blog/explain-this-code/)\nDeciphering the source code of a new software project can be a daunting or at least time-consuming task. The code may be poorly documented, or it may be written in a programming language that is unfamiliar to the developer. Even if the developer is familiar with the programming language, the code may be complex and difficult to understand. But what if developers had a helpful tool to figure out very quickly what code was doing? With recent advancements in AI models, it's now possible to have code explained in natural language.\n\n5. [ML experiment: Summarizing issue comments](/blog/summarize-issues/)\nLarge language models (LLMs) power generative AI solutions by using deep learning algorithms to analyze vast amounts of natural language data. These models are trained on massive datasets to develop an understanding of language patterns and context. Once trained, the models can generate new text that mimics human language. In a rapid prototype, our own Alexandru Croitor, Senior Backend Engineer, and Nicolas Dunlar, Senior Frontend Engineer for our Plan stage, leverage generative AI LLMs to power comment summarization within GitLab's issues.\n\n6. [ML experiment: Summarize merge request changes](/blog/merge-request-changes-summary-ai/)\nMerge requests are the central point of collaboration for code changes in GitLab. They often contain a variety of changes across many files and services within a project. Often, merge requests communicate the intent of the change as it relates to an issue being resolved, but they might not describe what was changed to achieve that. As review cycles progress, the current state of the merge request can become out of sync with the realities of the proposed changes and keeping people informed. We believe that we can leverage AI and large language models (LLMs) to help provide relevant summaries of a merge request and its proposed changes, so reviewers and authors can spend more time discussing changes and less time keeping descriptions updated.\n\n7. [ML experiment: Generate tests for code changes](/blog/merge-request-suggest-a-test/)\nProposing changes and new features via merge requests is great, but what about the tests? Sometimes, tests can be the hardest part of any code change you make. We are leveraging generative AI to enable developers to create tests for merge request changes helping reduce the laborious but important task of writing tests increasing test coverage. \n\n8. [ML experiment: Explain this vulnerability](/blog/explain-this-vulnerability/)\nSecurity vulnerabilities aren't always easy to understand, especially for developers without experience or training with cybersecurity. We're leveraging AI to help developers understand security vulnerabilities and even get guidence on how to resolve them.\n\n9. [ML experiment: Use a chatbot to answer how-to questions](/blog/gitlab-chat-ai/)\nLarge language models (LLMs) have changed the way everyday people interact with large volumes of text. We thought, why not train an LLM on GitLab's extensive documentation to help users quickly answer natural language questions. Gone are the days of endless searching through vast documentation sites.\n\n10. [Track ML model experiments with new GitLab MLFlow integration](/blog/track-machine-learning-model-experiments/)\nModel experiments allow data scientists to track different variations of machine learning models directly on GitLab.\n\n11. [Code Suggestions available to all GitLab tiers while in Beta](/blog/code-suggestions-for-all-during-beta/)\nWe've made code suggestions available to all plans for free during Beta. Also, learn about recent updates to Code Suggestions.\n\n12. [ML experiment: Summarize my merge request review](/blog/summarize-my-merge-request-review/) \nLearn how GitLab is experimenting with ML-powered merge request review summaries.\n\n13. [How Code Suggestions can supercharge developers' daily productivity](/blog/code-suggestions-improves-developer-productivity/)\nLearn how you can use GitLab Code Suggestions to accelerate your development.\n\n14. [ML experiment: Extending Code Suggestions to more development environments](/blog/extending-code-suggestions/)\nLearn how we're expanding Code Suggestions to support Visual Studio, \nJetBrains IDEs, Neovim, and other development environments.\n\n15. [Train and deploy AI models with GitLab and Google Vertex AI](/blog/training-and-deploying-ai-models-with-gitlab-and-vertex-ai/)\nA demonstration of GitLab's DevSecOps capabilities combined with Vertex AI's scalable ML platform, designed with the aim of rapid and secure AI deployments. \n\n16. [Self-managed support for Code Suggestions (Beta)](/blog/self-managed-support-for-code-suggestions/)\nOne of our most popular customer requests – self-managed support for Code Suggestsions (Beta) – is expected to ship soon in GitLab 16.1. Learn how it will work.\n\n17. [Meet GitLab Duo - The suite of AI capabilities powering your workflows](/blog/meet-gitlab-duo-the-suite-of-ai-capabilities/)\nLearn about GitLab Duo, an expanding toolbox of features integrated directly into the GitLab platform to assist DevSecOps teams.\n\n18. [GitLab for Visual Studio, including code suggestions, available in Beta](/blog/gitlab-visual-studio-extension/)\nGitLab for Visual Studio extension supports GitLab Duo code suggestions for both GitLab SaaS and GitLab self-managed.\n\n19. [Empower ModelOps and HPC workloads with GPU-enabled runners integrated with CI/CD](/blog/empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners)\nLearn how to leverage our GitLab-hosted GPU-enabled runners for ModelOps and high-performance computing workloads.\n\n20. [GitLab Duo Code Suggestions for JetBrains and Neovim](/blog/gitlab-jetbrains-neovim-plugins/) GitLab plugins for JetBrains IDEs and Neovim are now available in Beta,\nbringing GitLab Duo Code Suggestions to more software development environments.\n\nWant to learn even more about AI/ML? Check out our [AI Assisted Group direction page](/direction/modelops/ai_assisted/) and more [AI/ML articles](/blog/tags.html#AI/ML).\n",[849,725,704,9],"DevOps",{"slug":851,"featured":6,"template":680},"ai-ml-in-devsecops-series","content:en-us:blog:ai-ml-in-devsecops-series.yml","Ai Ml In Devsecops Series","en-us/blog/ai-ml-in-devsecops-series.yml","en-us/blog/ai-ml-in-devsecops-series",{"_path":857,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":858,"content":864,"config":870,"_id":872,"_type":14,"title":873,"_source":16,"_file":874,"_stem":875,"_extension":19},"/en-us/blog/ai-powered-growth-transform-every-stage-of-software-delivery",{"title":859,"description":860,"ogTitle":859,"ogDescription":860,"noIndex":6,"ogImage":861,"ogUrl":862,"ogSiteName":693,"ogType":694,"canonicalUrls":862,"schema":863},"AI-powered growth: Transform every stage of software delivery","Find out how the latest developments in the GitLab AI-powered DevSecOps Platform boost efficiency throughout the software development lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669095/Blog/Hero%20Images/gitlabduo.png","https://about.gitlab.com/blog/ai-powered-growth-transform-every-stage-of-software-delivery","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"AI-powered growth: Transform every stage of software delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Laurena Alves\"}],\n        \"datePublished\": \"2024-01-29\",\n      }",{"title":859,"description":860,"authors":865,"heroImage":861,"date":867,"body":868,"category":10,"tags":869},[866],"Laurena Alves","2024-01-29","It’s clear artificial intelligence (AI) is top of mind for organizations and their software development teams. With developers and security teams forced to focus so much of their time on repetitive, mundane tasks, of course they’re looking for ways to ease that work and carve out more time in their schedules.\n\nAccording to the [GitLab 2023 State of AI in Software Development report](https://about.gitlab.com/developer-survey/), developers noted spending 25% of their total work time writing code, with the rest spent improving existing code, understanding existing code, testing, maintaining code, and identifying and mitigating security vulnerabilities. As organizations incorporate AI into their DevSecOps processes, they'll need to adopt AI solutions — such as vulnerability explanations, code change summaries, automated tests, and more — that will help them introduce efficiencies and boost productivity and collaboration across the entire software development lifecycle (SDLC).\n\nWe know AI, with the efficiencies it brings across the entire SDLC, has a role to play in saving developers’ time, improving their jobs, and helping speed the production of secure software. Over the past few months, GitLab has released a set of AI-powered features that will help you do just that. \n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n### GitLab Duo Pro: Unleash the power of AI\n\nFor Ultimate and Premium customers, [GitLab Duo Pro](https://about.gitlab.com/pricing/) is a new add-on package that brings together Code Suggestions, GitLab Duo Chat (Beta), and organizational control capabilities, ensuring that teams can take advantage of AI exactly where they need it. \n\nCode Suggestions helps automate routine coding tasks, allowing developers to focus on delivering higher-quality software at speed, while Chat helps teams write and understand code faster, get up to speed on the status of projects, and quickly learn GitLab. And the organizational controls included in GitLab Duo Pro give organizations fine-grained control over which users have access to AI, ensuring alignment with team workflows and security requirements. At an introductory price of $9 USD per user per month (valid until January 31), and then $19 USD per user per month after February 1, GitLab Duo Pro is an investment in developer efficiency and organizational governance. [Contact us today to get started](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/).\n\n### Remove roadblocks and ship faster\n\nDevelopers struggle with repetitive tasks slowing down their workflow and hindering innovation. Manual code reviews and testing take additional time, inhibiting release cycles. And inconsistent quality standards raise security alarms and can create potential vulnerabilities. This all stifles productivity and reduces developer happiness. \n\nWith the recent general availability of [Code Suggestions](https://about.gitlab.com/blog/gitlab-duo-code-suggestions-is-generally-available/), developers of all skill levels now have a virtual assistant that makes building software faster, more efficient, and more secure. The AI-powered feature now has enhancements like [Code Generation](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/), which generates code based on a natural language comment blocks, and [Repository X-ray](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/repository_xray.html), which improves the accuracy and relevance of code recommendations. \n\nCode Suggestions is your virtual pair programming buddy that can handle repetitive tasks, giving developers more time to focus on creating innovative products. Acting as a coding sidekick, this AI capability can automatically generate lines of code, complete functions, and even extend unfamiliar codebases. Code Suggestions supports 15 languages, including C++, C#, Go, Java, JavaScript, Python, PHP, Ruby, Rust, Scala, Kotlin, and TypeScript. It also integrates seamlessly into many popular IDEs like VS Code, Visual Studio, JetBrains’ suite of IDEs, and Neovim.\n\n### Gain the power of an AI assistant\n\nWhile Code Suggestions takes the spotlight, it's just one capability of [GitLab Duo](https://about.gitlab.com/gitlab-duo/), which is a full suite of [15 AI-assisted features](https://docs.gitlab.com/ee/user/ai_features.html) that go beyond code generation, catering to every stage of software development — from code generation and testing to security analysis and documentation. [GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) is the foundational technology that connects users with our broader set of AI-powered GitLab Duo features. With Chat, users have a personal AI assistant that helps them answer questions, understand code, and even generate tests — all within the same IDE where development work happens. Together, these capabilities form a cohesive ecosystem. \n\nThese features are critical because they’ll help prevent bottlenecks from forming downstream as developers, who are getting a productivity boost from Code Suggestions, create more code faster. GitLab Duo offers targeted solutions, like Vulnerability Explanation and Merge Request Summary, to improve software quality and keep workflows moving.\n\nImportantly, GitLab prioritizes privacy and transparency in its AI approach. Customer code is not used for training, ensuring organizations have complete data ownership and control.\n\n> Live demo! Discover the future of AI-driven software development at our GitLab 17 virtual launch event. [Register today](https://about.gitlab.com/seventeen/)!\n",[9,482,725],{"slug":871,"featured":91,"template":680},"ai-powered-growth-transform-every-stage-of-software-delivery","content:en-us:blog:ai-powered-growth-transform-every-stage-of-software-delivery.yml","Ai Powered Growth Transform Every Stage Of Software Delivery","en-us/blog/ai-powered-growth-transform-every-stage-of-software-delivery.yml","en-us/blog/ai-powered-growth-transform-every-stage-of-software-delivery",{"_path":877,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":878,"content":885,"config":891,"_id":894,"_type":14,"title":895,"_source":16,"_file":896,"_stem":897,"_extension":19},"/en-us/blog/ai-trends-for-2025-agentic-ai-self-hosted-models-and-more",{"title":879,"description":880,"ogTitle":879,"ogDescription":880,"config":881,"ogImage":882,"ogUrl":883,"ogSiteName":693,"ogType":694,"canonicalUrls":883,"schema":884},"AI trends for 2025: Agentic AI, self-hosted models, and more","Discover coming trends in AI for software development, from on-premises model deployments to proactive AI assistants.",{"noIndex":91},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665039/Blog/Hero%20Images/display-the-source-article-ai-trends-coming-in-2026-image-0492-1800x945-fy25.png","https://about.gitlab.com/blog/ai-trends-for-2025-agentic-ai-self-hosted-models-and-more","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"AI trends for 2025: Agentic AI, self-hosted models, and more\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emilio Salvador\"}],\n        \"datePublished\": \"2025-01-13\",\n      }",{"title":879,"description":880,"authors":886,"heroImage":882,"date":888,"body":889,"category":10,"tags":890},[887],"Emilio Salvador","2025-01-13","This is the [AI trends for 2025: Agentic AI, self-hosted models, and more](https://about.gitlab.com/the-source/ai/ai-trends-for-2025-agentic-ai-self-hosted-models-and-more/) post.",[9],{"slug":892,"featured":6,"template":680,"externalUrl":893},"ai-trends-for-2025-agentic-ai-self-hosted-models-and-more","https://about.gitlab.com/the-source/ai/ai-trends-for-2025-agentic-ai-self-hosted-models-and-more/","content:en-us:blog:ai-trends-for-2025-agentic-ai-self-hosted-models-and-more.yml","Ai Trends For 2025 Agentic Ai Self Hosted Models And More","en-us/blog/ai-trends-for-2025-agentic-ai-self-hosted-models-and-more.yml","en-us/blog/ai-trends-for-2025-agentic-ai-self-hosted-models-and-more",{"_path":899,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":900,"content":906,"config":912,"_id":914,"_type":14,"title":915,"_source":16,"_file":916,"_stem":917,"_extension":19},"/en-us/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow",{"title":901,"description":902,"ogTitle":901,"ogDescription":902,"noIndex":6,"ogImage":903,"ogUrl":904,"ogSiteName":693,"ogType":694,"canonicalUrls":904,"schema":905},"Automate tedious coding tasks with GitLab Duo Workflow","See how agentic AI can reduce time spent on repetitive tasks, freeing you up to focus on developing innovative solutions and shipping the next big thing.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662465/Blog/Hero%20Images/GitLab_Duo_Workflow_Unified_Data_Store__1_.png","https://about.gitlab.com/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automate tedious coding tasks with GitLab Duo Workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jeff Park\"}],\n        \"datePublished\": \"2025-05-06\",\n      }",{"title":901,"description":902,"authors":907,"heroImage":903,"date":909,"body":910,"category":10,"tags":911},[908],"Jeff Park","2025-05-06","Working with large codebases often means spending significant time on repetitive tasks that, while necessary, don't really push your projects forward. The good news is that these tasks are great candidates to be completed with AI. Reducing the time spent on them will free you up to work on more important problems that you’re actually excited to tackle. With GitLab Duo Workflow, the time spent on these tasks will go from hours to minutes.\n\n[Duo Workflow](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/) is a powerful new agentic solution, currently in private beta, that lives in VS Code and is designed to help you complete complex development tasks. While many AI coding assistants are focused on helping developers write code, Duo Workflow understands your project structure, reads your files, and can make coordinated changes across your entire codebase.\n\nI created a demonstration that showcases how Duo Workflow can transform a tedious coding task into a streamlined process that saves you time and mental energy.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1081627484?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Automate tedious coding tasks with GitLab Duo Workflow\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## The challenge: Implementing a new lint rule\n\nIn this demo, we tackle a common scenario that many developers face: implementing a new lint rule and then updating multiple files across the codebase to comply with this rule. The specific issue involves validation errors occurring in several project files that need to be addressed consistently.\n\nRather than manually identifying and modifying each affected file one by one – a process that could take hours depending on the size of your codebase – we'll see how Duo Workflow can:\n\n1. Read and understand the details from an issue\n2. Analyze the project structure to identify affected files\n3. Create a comprehensive plan to implement the necessary changes\n4. Draft a new lint rule to prevent future occurrences\n5. Make consistent code changes across all relevant files\n6. Stage the changes for your review before any commits are made\n\nA simple prompt initiates the process:\n\n\"Read through issue #1 in this project and submit code changes to resolve it. Be sure to look at each tool file and make all appropriate changes.\"\n\nFrom there, Duo Workflow takes over – reading the issue, analyzing the files, creating a plan, and implementing the solution – all while keeping me informed of its progress and reasoning.\n\n## Why this matters for your development process\n\nWhat's particularly powerful about Duo Workflow is how it maintains awareness of this wider context throughout the entire process. It's not just making text replacements based on a large language model's training data – it's understanding the code, making intelligent decisions, and proposing a complete solution that you maintain full control over.\n\nThis approach offers several key benefits:\n\n* **Consistency in implementation:** Apply changes uniformly across files\n* **Time savings:** Focus your energy on creative problem-solving rather than repetitive tasks\n* **Reduced context switching:** Complete complex tasks without leaving your IDE\n* **Keeping a human in the loop:** Review all proposed modifications before committing\n\n## What's next\n\nGitLab Duo Workflow is part of our work to bring AI-powered capabilities to every stage of the software development lifecycle. While this demo focuses on code editing, the same approach can be applied to various development tasks:\n\n* Implementing new features based on issue descriptions\n* Fixing bugs with comprehensive test coverage\n* Refactoring legacy code to modern standards\n* Creating documentation from codebase analysis\n\nWe believe that by automating repetitive tasks, Duo Workflow helps you focus on what matters most – solving interesting problems and creating innovative solutions for your users.\n\n> GitLab Duo Workflow is currently available in private beta for GitLab Ultimate customers. [Sign up for the waitlist today!](https://about.gitlab.com/gitlab-duo/agent-platform/)\n\n## Learn more\n- [Use GitLab Duo Workflow to improve application quality assurance](https://about.gitlab.com/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance/)\n- [Solving complex challenges with GitLab Duo Workflow](https://about.gitlab.com/blog/solving-complex-challenges-with-gitlab-duo-workflow/)\n- [GitLab Duo Workflow: Enterprise visibility and control for agentic AI](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/)\n- [Emerging agentic AI trends reshaping software development](https://about.gitlab.com/the-source/ai/emerging-agentic-ai-trends-reshaping-software-development/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n",[9,482,704,725,724,746],{"slug":913,"featured":91,"template":680},"automate-tedious-coding-tasks-with-gitlab-duo-workflow","content:en-us:blog:automate-tedious-coding-tasks-with-gitlab-duo-workflow.yml","Automate Tedious Coding Tasks With Gitlab Duo Workflow","en-us/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow.yml","en-us/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow",{"_path":919,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":920,"content":926,"config":933,"_id":935,"_type":14,"title":936,"_source":16,"_file":937,"_stem":938,"_extension":19},"/en-us/blog/automating-with-gitlab-duo-part-1-generating-tests",{"title":921,"description":922,"ogTitle":921,"ogDescription":922,"noIndex":6,"ogImage":923,"ogUrl":924,"ogSiteName":693,"ogType":694,"canonicalUrls":924,"schema":925},"Automating with GitLab Duo, Part 1: Generating tests","Learn how we used the AI-driven DevSecOps platform to generate automated tests and improve our development speed and quality.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097480/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%284%29_3LZkiDjHLjhqEkvOvBsVKp_1750097480784.png","https://about.gitlab.com/blog/automating-with-gitlab-duo-part-1-generating-tests","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automating with GitLab Duo, Part 1: Generating tests\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Byron Boots\"}],\n        \"datePublished\": \"2024-12-02\",\n      }",{"title":921,"description":922,"authors":927,"heroImage":923,"date":929,"body":930,"category":10,"tags":931},[928],"Byron Boots","2024-12-02","Automated testing is time-consuming and can feel like it’s not moving a project forward. However, as many developers have likely experienced, automated testing provides an overall positive return on investment. In building a custom module (we'll call it gitlab-helper for this article), this was particularly true.\n\nOur initial development focused on migrating tried and used functionality from existing scripts to a new module whose sole purpose was to serve as a baseline for future functionality. Although existing scripts lacked automated testing, their consistent usage was strong anecdotal evidence the functionality worked as expected.\n\nOur objective was to deliver a more mature solution to this problem, so automated testing became a necessity. This introduced the challenge of building efficiently, while balancing the time to test and ensure a robust product; and with a total of three team members, this was no small bottleneck. Therefore, the team decided to take advantage of [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI capabilities, for test generation, improving speed and quality of the delivered product.\n\nIn this three-part series on automating with GitLab Duo, we will cover:\n\n1. How we used GitLab Duo to generate tests for our code  \n2. How we worked interactively with GitLab Duo for more complex situations  \n3. The results we were able to achieve (Spoiler: 1 developer + GitLab Duo = 84% coverage in 2 days)\n\n## Using GitLab Duo to generate tests for code\n\nWhile functionality is available across tools, this article will cover using GitLab Duo in VS Code, with the [GitLab Workflow extension for VS Code](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow) to generate tests. Links to other GitLab Duo options are available in the [references](#references) below.\n\n### Install and enable GitLab Duo\n\nAs a prerequisite to using GitLab Duo, we ensured we had a GitLab Duo-enabled account. If you don't have GitLab Duo, you can [sign up for a free 60-day trial](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial).\n\nTo use GitLab Duo Chat in VS Code, we followed the [instructions for installation](https://docs.gitlab.com/ee/user/gitlab_duo_chat/#use-gitlab-duo-chat-in-vs-code). Then, we were able to see the GitLab Duo Chat extension on the sidebar and open the Chat window.\n\n![Ask a question window](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097489/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097488918.png)\n\n### Generate tests with Chat\n\ngitlab-helper is a custom module built for standardizing interaction with the GitLab API across the team's work and extends other library functionalities to simplify development and scripting work. Once a method or feature was migrated to gitlab-helper and appeared to be implemented appropriately, the process to generate tests for it was simple:\n- Select the method, class, or entire file in the IDE.\n- Right-click on the selected code.\n- Under **GitLab Duo Chat**, select **Generate tests**.\n\n![Sequence to generate tests, including drop-down for generate tests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097489/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097488919.png)\n\nWithin a few seconds, tests were generated and presented in the GitLab Duo Chat window. These tests can be reviewed and or added to the codebase, via copy/paste, into existing or new test files. As is the case with most natural language processing generations today, particularly around context, some of the initial tests created by GitLab Duo failed, thus requiring finetuning (for instance, when dealing with nested dependencies).\n\n> **Pro tip:** GitLab Duo does not auto-create files to add generated tests to. We found it was helpful to create new test files and add a `# Tests Generated by Duo` comment at the top of them and suffix them with `_duo.py` to indicate where the tests came from.\n\nGitLab Duo provided a great starting point for building out gitlab-helper’s automated testing and greatly improved test writing efficiency and code coverage, speeding up the development process substantially. Alongside GitLab Duo, numerous iterations of valuable tests were introduced into the gitlab-helper module with human oversight.\n\nRead the next installment in this series where we share [what we learned while using GitLab Duo for generating automated tests](https://about.gitlab.com/blog/automating-with-gitlab-duo-part-2-complex-testing/) and working interactively with AI for more complex situations.\n\n## References\n\nThere’s more than one way to use GitLab Duo to generate tests, check out the other options below:\n\n* The GitLab UI  \n* [The GitLab Web IDE (VS Code in the cloud)](https://docs.gitlab.com/ee/user/project/web_ide/index.html)  \n* VS Code, with the [GitLab Workflow extension for VS Code](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow)  \n* JetBrains IDEs, with the [GitLab Duo Plugin for JetBrains](https://plugins.jetbrains.com/plugin/22325-gitlab-duo)  \n* Visual Studio for Windows, with the [GitLab Extension for Visual Studio](https://marketplace.visualstudio.com/items?itemName=GitLab.GitLabExtensionForVisualStudio)\n",[9,724,932,482,704],"testing",{"slug":934,"featured":6,"template":680},"automating-with-gitlab-duo-part-1-generating-tests","content:en-us:blog:automating-with-gitlab-duo-part-1-generating-tests.yml","Automating With Gitlab Duo Part 1 Generating Tests","en-us/blog/automating-with-gitlab-duo-part-1-generating-tests.yml","en-us/blog/automating-with-gitlab-duo-part-1-generating-tests",{"_path":940,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":941,"content":947,"config":952,"_id":954,"_type":14,"title":955,"_source":16,"_file":956,"_stem":957,"_extension":19},"/en-us/blog/automating-with-gitlab-duo-part-2-complex-testing",{"title":942,"description":943,"ogTitle":942,"ogDescription":943,"noIndex":6,"ogImage":944,"ogUrl":945,"ogSiteName":693,"ogType":694,"canonicalUrls":945,"schema":946},"Automating with GitLab Duo, Part 2: Complex testing","Find out how the GitLab team addressed more complex testing situations using GitLab Duo's AI capabilities, including ensuring that code testing followed standards.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099243/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%284%29_3LZkiDjHLjhqEkvOvBsVKp_1750099243011.png","https://about.gitlab.com/blog/automating-with-gitlab-duo-part-2-complex-testing","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automating with GitLab Duo, Part 2: Complex testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Byron Boots\"}],\n        \"datePublished\": \"2024-12-10\",\n      }",{"title":942,"description":943,"authors":948,"heroImage":944,"date":949,"body":950,"category":10,"tags":951},[928],"2024-12-10","The first part of our three-part series on [test generation with GitLab Duo](https://about.gitlab.com/blog/automating-with-gitlab-duo-part-1-generating-tests/) focused on how to automate code testing. Now, we will share the lessons we learned while using AI for test generation.\n\n## Situations we encountered and how we handled them\n\nOverall, we were pleased with the results using [GitLab Duo](https://about.gitlab.com/gitlab-duo/) to generate tests on our code. As is the case with any language generation, some cases required minor adjustments such as fixing import paths or editing contents in datasets. For the more complex cases, we had to remember that AI solutions often lack context. Here's how we handled the more complex testing situations with GitLab Duo.\n\n### Updating existing test cases\n\nAs is often the case when developing a software product, we encountered instances that required updates to existing tests. Rather than manually making adjustments to a full test suite for a common issue, we took full advantage of the GitLab Duo Chat window in VS Code. For example, to refactor tests, we used the Chat prompt “Please update the provided tests to use unittest rather than pytest” followed by pasting in the tests we wanted GitLab Duo to update.\n\n![Automated test generation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099252/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099252303.png)\n\n\u003Cbr>\u003C/br>\n\n![Chat prompt requesting use of unittest rather than pytest](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099252/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099252304.png)\n\n**Note:** We copy-and-pasted GitLab Duo's recommendations into our code.\n\n### Creating tests for legacy code\n\nCreating tests for legacy code we knew worked was another challenging situation we encountered. In such circumstances, it was valuable to provide error snippets alongside failing tests and ask GitLab Duo to provide new tests. A full copy-and-paste from the terminal window of noted failures and errors to Chat, along with a request to “Please explain and fix this failing test” or similar prompts, yielded a summary of the issues the test was encountering as well as a new test addressing the problem. We did find this sometimes required multiple rounds of refactoring as new test failures were identified. However, the efficiency of GitLab Duo to provide various refactored solutions was fast and a net positive on team and developer efficiency.\n\n### Working with complex or abstracted code\n\nIn other instances, the modularization or complexity of our code led to variance in GitLab Duo’s results. For instance, when generating tests, GitLab Duo sometimes generated a series of passing and failing tests caused by differences in testing approach (e.g. usage of Mock and which objects were mocked). We provided GitLab Duo its own example of a passing test and asked it to modify individual tests one at a time to match the style of the passing tests to maintain consistency. We also would provide GitLab Duo a file of functioning tests for a similar object or task so it could mirror the structure.\n\n### Ensuring generated code follows our standards\n\nWhile developing a Python module, GitLab Duo generated many tests using Mock and often they required refactoring, particularly around naming standardization. In such cases, we could leverage GitLab Duo Chat to refactor tests with instructions as to which specific test components to update. Prompting GitLab Duo for these changes was immensely faster than refactoring tests individually, as we had previousy done.\n\n### Addressing uncovered test cases\n\nGitLab Duo generated tests for additional test cases the team had not previously considered, thus increasing coverage. Luckily, we could use GitLab Duo to quickly and efficiently address these edge cases and expand testing coverage, which is a key value-add for our team to build quickly and ensure a robust product.\n\n## What we learned\n\nHere are few key lessons that have been important to our success with GitLab Duo:\n\n* **Fast and efficient for rapid development and iteration -** GitLab Duo’s role in generating automated tests has been a key accelerator in development for our team and allowed us to work faster and with greater confidence in our changes.\n* **Important to use appropriate prompts -** When using GitLab Duo for our use case, we touched on a key topic for machine learning optimization: prompt engineering. Sometimes we needed to modify our question by just a few keywords to lead to the ideal generated answer. \n* **Need understanding of underlying frameworks and code -** When it comes to any AI-generated code that makes it into a product, even if only as testing, it’s critical that we understand how the code functions so we can adequately debug as well as request informed changes.\n* **Need understanding of desired end state and standards -** Similar to following coding standards for formatting and library usage while developing without AI, it’s important to maintain the vision of what the intended outcomes look like and what standards are being adhered to when using AI. GitLab Duo needs the context to understand code standards, so it’s critical for team members using GitLab Duo to provide adequate oversight of its outputs to ensure quality and other expectations are met.\n* **GitLab Duo is not a replacement for all tests -** While we use GitLab Duo significantly for generating automated tests, it does not replace our other tests and human oversight. Functional tests, integration tests, and more still serve a valuable place in the QA process and overall software development lifecycle.\n\nIn our next article in this series, we’ll cover [a test we ran to validate the impact of GitLab Duo on our team’s automated testing](https://about.gitlab.com/blog/automating-with-gitlab-duo-part-3-validating-testing/) and discuss the impressive results we have achieved thus far.",[9,932,482],{"slug":953,"featured":6,"template":680},"automating-with-gitlab-duo-part-2-complex-testing","content:en-us:blog:automating-with-gitlab-duo-part-2-complex-testing.yml","Automating With Gitlab Duo Part 2 Complex Testing","en-us/blog/automating-with-gitlab-duo-part-2-complex-testing.yml","en-us/blog/automating-with-gitlab-duo-part-2-complex-testing",{"_path":959,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":960,"content":966,"config":971,"_id":973,"_type":14,"title":974,"_source":16,"_file":975,"_stem":976,"_extension":19},"/en-us/blog/automating-with-gitlab-duo-part-3-validating-testing",{"title":961,"description":962,"ogTitle":961,"ogDescription":962,"noIndex":6,"ogImage":963,"ogUrl":964,"ogSiteName":693,"ogType":694,"canonicalUrls":964,"schema":965},"Automating with GitLab Duo, Part 3: Validating testing","Discover what test we ran to validate the impact of GitLab Duo on our team’s automated testing – and the results we achieved.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097447/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%284%29_3LZkiDjHLjhqEkvOvBsVKp_1750097447404.png","https://about.gitlab.com/blog/automating-with-gitlab-duo-part-3-validating-testing","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automating with GitLab Duo, Part 3: Validating testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Byron Boots\"}],\n        \"datePublished\": \"2024-12-17\",\n      }",{"title":961,"description":962,"authors":967,"heroImage":963,"date":968,"body":969,"category":10,"tags":970},[928],"2024-12-17","In previous entries in this series, we covered [how we used GitLab Duo to generate tests for our code](https://about.gitlab.com/blog/automating-with-gitlab-duo-part-1-generating-tests/) as well as [what we learned while using GitLab Duo for generating automated tests](https://about.gitlab.com/blog/automating-with-gitlab-duo-part-2-complex-testing/). We also shared some of the ways we addressed making changes to GitLab Duo generated tests. This last article in the series will cover a test we ran to validate the impact of GitLab Duo on our team’s automated testing and discuss the impressive results we have achieved thus far.\n\n### Validation testing results\n\nTo validate that our usage of GitLab Duo to generate tests was adding value the way we expected, we challenged ourselves and GitLab Duo to replace and increase test coverage. The team removed all previously written tests to get our test coverage to 0% and then methodically went through the repository and created new test files to store GitLab Duo-generated tests.\n\nFrom this starting point, the team followed the steps outlined in [the first blog](https://about.gitlab.com/blog/automating-with-gitlab-duo-part-1-generating-tests/) to generate tests. Tests and test files were unmodified by humans to provide a stable control group and a `Tests Generated by Duo` comment at the top of them were suffixed by `duo.py` to indicate where the tests came from.\n\nAll iterations of the tests were only done through interactions with GitLab Duo through the `Generate Tests` and GitLab Duo Chat window as outlined in [the second blog in the series](https://about.gitlab.com/blog/automating-with-gitlab-duo-part-2-complex-testing/). As we shared, we requested GitLab Duo to make updates based on encountered errors, test failures, and example code snippets for GitLab Duo to use as added context. \n\nAt all times. when testing with GitLab Duo, we were running tests and coverage reports so we could see if our GitLab Duo-generated tests were increasing testing coverage and adding value as we expected. Taking advantage of [GitLab's test coverage visualization](https://docs.gitlab.com/ee/ci/testing/test_coverage_visualization/), we were able to continuously monitor the results of our work.\n\nUltimately, after using GitLab Duo to regenerate tests for code previously covered through our mostly manual testing, we were able to achieve test coverage of 84%. This was a great accomplishment for the team because:\n\n1. It was a significant improvement from prior coverage, which was at 74%.  \n2. It took approximately two days by one engineer to achieve 84%, compared to the approximately four weeks across multiple engineers that the 74% had taken.\n\nSince this experiment, the team has increased coverage even further to 89% with the help of GitLab Duo, while continuing to introduce new features.\n\n![image of achievements](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097456/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097456771.png)\n\nUsing GitLab Duo allowed for increased testing efficiency and coverage, and also allowed developers with lower context around existing code to write valuable tests, quickly. This has resulted in increased confidence on the team to develop new features without worrying about introducing errors.\n\n> If you'd like to [try GitLab Duo](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/), sign up for a free, 60-day trial today!\n",[9,932,482,704],{"slug":972,"featured":6,"template":680},"automating-with-gitlab-duo-part-3-validating-testing","content:en-us:blog:automating-with-gitlab-duo-part-3-validating-testing.yml","Automating With Gitlab Duo Part 3 Validating Testing","en-us/blog/automating-with-gitlab-duo-part-3-validating-testing.yml","en-us/blog/automating-with-gitlab-duo-part-3-validating-testing",{"_path":978,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":979,"content":985,"config":992,"_id":994,"_type":14,"title":995,"_source":16,"_file":996,"_stem":997,"_extension":19},"/en-us/blog/build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow",{"title":980,"description":981,"ogTitle":980,"ogDescription":981,"noIndex":6,"ogImage":982,"ogUrl":983,"ogSiteName":693,"ogType":694,"canonicalUrls":983,"schema":984},"Build an ML app pipeline with GitLab Model Registry using MLflow","Learn how to manage your ML apps entirely through GitLab with this tutorial. Also discover the role machine learning operations, or MLOps, plays in automating the DevSecOps lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749660151/Blog/Hero%20Images/blog-image-template-1800x945__26_.png","https://about.gitlab.com/blog/build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Build an ML app pipeline with GitLab Model Registry using MLflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gufran Yeşilyurt, OBSS\"},{\"@type\":\"Person\",\"name\":\"Péter Bozsó\"}],\n        \"datePublished\": \"2024-09-17\",\n      }",{"title":980,"description":981,"authors":986,"heroImage":982,"date":989,"body":990,"category":10,"tags":991},[987,988],"Gufran Yeşilyurt, OBSS","Péter Bozsó","2024-09-17","__*Editor's note: From time to time, we invite members of our partner community to contribute to the GitLab Blog. Thanks to Gufran Yeşilyurt, a DevOps consultant at OBSS Technology, for co-creating with us.*__\n\nThis tutorial will walk you through setting up an MLOps pipeline with GitLab Model Registry, utilizing MLflow. This will be a great starting point to manage your ML apps entirely through GitLab. But first, it is crucial to understand why we need MLOps and what GitLab offers.\n\n[MLOps](https://about.gitlab.com/direction/modelops/mlops/#overview), or machine learning operations, is a critical practice for managing and automating the lifecycle of machine learning models, from development to deployment and maintenance. Its importance lies in addressing the complexity and dynamism of machine learning workflows, which involve not just software development but also data management, model training, testing, deployment, and continuous monitoring.\n\nMLOps ensures that models are reproducible, scalable, and maintainable, facilitating collaboration between data scientists, machine learning engineers, and operations teams. By incorporating MLOps, organizations can streamline the deployment process, reduce time to market, and improve the reliability and performance of their machine learning applications.\n\nThe necessity of MLOps arises from the unique challenges posed by machine learning projects. Unlike traditional software development, machine learning involves handling large datasets, experimenting with various models, and continuously updating models based on new data and feedback.\n\nWithout proper operations, managing these aspects becomes cumbersome, leading to potential issues like model drift, where the model's performance degrades over time due to changes in the underlying data. MLOps provides a structured approach to monitor and manage these changes, ensuring that models remain accurate and effective. Moreover, it introduces automation in various stages, such as data preprocessing, model training, and deployment, thereby reducing manual errors and enhancing efficiency.\n\nGitLab's features play a pivotal role in implementing MLOps effectively. GitLab provides an integrated platform that combines source code management, [CI/CD pipelines](https://about.gitlab.com/topics/ci-cd/), tracking and collaboration tools, making it ideal for managing machine learning projects.\n\nWith GitLab, teams can leverage version control to track changes in both code and data, ensuring reproducibility and transparency. The CI/CD pipelines in GitLab automate the testing and deployment of machine learning models, allowing for continuous integration and continuous delivery. This automation not only speeds up the deployment process but also ensures consistency and reliability in the models being deployed. \n\nAdditionally, GitLab's collaboration features, such as merge requests and code reviews, facilitate better communication and coordination among team members, ensuring that everyone is aligned and any issues are promptly addressed.\n\nPrerequisites:\n- basic knowledge of GitLab pipelines\n- basic knowledge of MLflow\n- a Kubernetes cluster\n- Dockerfile\n\nThis tutorial includes instructions to:\n- [Set up environment variables of MLflow](#set-up-environment-variables-of-mlflow)\n- [Train and log candidates at merge request](#train-and-log-candidates-at-merge-request)\n- [Register the most successful candidate](#register-the-most-successful-candidate)\n- [Dockerize and deploy an ML app with the registered model](#dockerize-and-deploy-an-ml-app-with-the-registered-model)\n\nIn this example, to decide whether to provide the user a loan, we make use of Random Forest Classifier, Decision Tree, and Logistic Regression. At the end of this showcase, we will have a web application that utilizes machine learning to respond to the user.\n\nTo reproduce this example in your own GitLab environment, you can read the rest of this article or follow the video below. You can find the source code of this example in [these OBSS repositories](https://gitlab.com/gitlab-partners-public/obss).\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/grNJAp1xAi0?si=Bf9CAP9lB1uWErOZ\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Set up environment variables of MLflow\n\nOn the host where the code is executed, set the environment variables for tracking URI and token. This might be a remote host, CI pipeline, or your local environment. When they are set, you can call `mlflow.set_experiment(\"\u003Cexperiment_name>\")`. As a reference:\n\n```\nexport MLFLOW_TRACKING_URI=\"\u003Cyour gitlab endpoint>/api/v4/projects/\u003Cyour project id>/ml/mlflow\"\nexport MLFLOW_TRACKING_TOKEN=\"\u003Cyour_access_token>\"\n```\n\n**Note:** If the training code contains the call to `mlflow.set_tracking_uri()`, remove it.\n\n## Train and log candidates at merge request\n\nIn your model train code, you can use MLflow methods to log metrics, artifacts, and parameters. You can also divide the train steps into pipeline stages if you are comfortable with that part. In this example, one Python file will be used for both training and report generation.\n\n```\nmlflow.log_params(params)\nmlflow.log_metrics(metrics_data)\nmlflow.log_artifact(artifacts)\n```\n\nYou can then create the necessary pipeline to train the experiment. By adding the relevant rules, you can trigger this pipeline manually in merge requests and observe the report generated as MR Note.\n\nWhen the pipeline is finished, you can see the details about the candidate in **Analyze > Model Experiments**.\n\n![details about the candidate in the finished pipeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676127/Blog/Content%20Images/Screenshot_1.png)\n\n## Register the most successful candidate\n\nAccording to the measurements you have made, we can register the most successful candidate (may be the one with the highest accuracy value) with the Run ID of the candidate.\n\nBut first, we need to create a model and its version in Registry. I created these steps in separate stages and components (because I may need these steps in other projects). You should be careful to use semantic versioning when versioning.\n\n### Register source model parameters and metrics\n\n```\nsource_candidate = client.get_run(source_candidate_id)\nparams = { k: v for k, v in source_candidate.data.params.items() }\nmetric = { k: v for k, v in source_candidate.data.metrics.items() }\n\nmodel_version = client.get_model_version(model_name, version)\nrun_id = model_version.run_id\nmodel_class = \"\"\nfor name, value in params.items():\n    client.log_param(run_id, name, value)\n    if name == \"Class\":\n        model_class = value\n\nfor name, value in metric.items():\n    client.log_metric(run_id, name, value)\n\n```\n\nAfter logging the parameters and metrics, you can [register the artifacts](https://gitlab.com/gitlab-partners-public/obss/mlops-loan-prediction/-/blob/main/register_candidate.py) as you did in the train step.\n\nYou may want to manually enter the inputs of the relevant steps as [a variable in the pipeline](https://gitlab.com/gitlab-partners-public/obss/components/-/blob/main/templates/register-candidate.yml).\n\n## CI/CD components\n\nI have used [CI/CD components](https://docs.gitlab.com/ee/ci/components/) because they provide a structured environment for managing machine learning workflows. These components enable reusability by allowing teams to store and share standardized scripts, models, and datasets, ensuring that previous work can be easily accessed, modified, and redeployed in future projects, thus accelerating development and reducing redundancy.\n\n> [Learn more about CI/CD components and the CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/).\n\n## Dockerize and deploy an ML app with the registered model\n\nIn this project, while registering the model, I also register the pkl file as an artifact and then create the docker image with that artifact and send it to [GitLab Container Registry](https://about.gitlab.com/blog/next-generation-gitlab-container-registry-goes-ga/).\n\nYou can now access your Docker image from the Container Registry and deploy it to your environment with the method you want.\n\n## Resources\n- [Model experiments](https://docs.gitlab.com/ee/user/project/ml/experiment_tracking/)\n- [MLflow client compatibility](https://docs.gitlab.com/ee/user/project/ml/experiment_tracking/mlflow_client.html)\n- [CI/CD components](https://docs.gitlab.com/ee/ci/components/)\n- [Building GitLab with GitLab: Why there is no MLOps without DevSecOps](https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops/)\n\n***Credits:**\nThis tutorial and the corresponding sample projects were created and generously shared with the community by [OBSS](https://obss.tech/en/). OBSS is an EMEA-based channel partner of GitLab. They have deep expertise across the whole DevSecOps lifecycle and amongst many other things, they are more than happy to support customers with migrating their MLOps workloads to GitLab.*\n",[9,724,109,282],{"slug":993,"featured":91,"template":680},"build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow","content:en-us:blog:build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow.yml","Build An Ml App Pipeline With Gitlab Model Registry Using Mlflow","en-us/blog/build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow.yml","en-us/blog/build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow",{"_path":999,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1000,"content":1006,"config":1012,"_id":1014,"_type":14,"title":1015,"_source":16,"_file":1016,"_stem":1017,"_extension":19},"/en-us/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions",{"title":1001,"description":1002,"ogTitle":1001,"ogDescription":1002,"noIndex":6,"ogImage":1003,"ogUrl":1004,"ogSiteName":693,"ogType":694,"canonicalUrls":1004,"schema":1005},"Explore the Dragon Realm: Building a C++ adventure game with AI","How to use GitLab Duo Code Suggestions to create a text-based adventure game, including magical locations to visit and items to procure, using C++.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663344/Blog/Hero%20Images/compassinfield.jpg","https://about.gitlab.com/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Explore the Dragon Realm: Build a C++ adventure game with a little help from AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2023-08-24\",\n      }",{"title":1007,"description":1002,"authors":1008,"heroImage":1003,"date":1009,"body":1010,"category":10,"tags":1011},"Explore the Dragon Realm: Build a C++ adventure game with a little help from AI",[698],"2023-08-24","\nLearning, for me, has never been about reading a textbook or sitting in on a lecture - it's been about experiencing and immersing myself in a hands-on challenge. This is particulary true for new programming languages. With [GitLab Duo Code Suggestions](https://about.gitlab.com/gitlab-duo/), artificial intelligence (AI) becomes my interactive guide, providing an environment for trial, error, and growth. In this tutorial, we will build a text-based adventure game in C++ by using Code Suggestions to learn the programming language along the way.\n\nYou can use this table of contents to navigate into each section. It is recommended to read top-down for the best learning experience.\n\n- [Setup](#setup)\n  - [Installing VS Code](#installing-vs-code)\n  - [Installing Clang as a compiler](#installing-clang-as-a-compiler)\n  - [Setting up VS Code](#setting-up-vs-code)\n- [Getting started](#getting-started)\n  - [Compiling and running your program](#compiling-and-running-your-program)\n- [Setting the text adventure stage](#setting-the-adventure-stage)\n- [Defining the adventure: Variables](#defining-the-adventure-variables)\n- [Crafting the adventure: Making decisions with conditionals](#crafting-the-adventure-making-decisions-with-conditionals)\n- [Structuring the narrative: Characters](#structuring-the-narrative-characters)\n- [Structuring the narrative: Items](#structuring-the-narrative-items)\n- [Applying what we've learned at the Grand Library](#applying-what-weve-learned-at-the-grand-library)\n- [See you next time in the Dragon Realm](#see-you-next-time-in-the-dragon-realm)\n- [Share your feedback](#share-your-feedback)\n\n> Download [GitLab Ultimate for free](https://about.gitlab.com/gitlab-duo/) for a 30-day trial of GitLab Duo Code Suggestions.\n\n## Setup\nYou can follow this tutorial in your [preferred and supported IDE](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-in-other-ides-and-editors). Review the documentation to enable Code Suggestions for [GitLab.com SaaS](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-gitlab-saas) or [GitLab self-managed instances](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-self-managed-gitlab).\n\nThese installation instructions are for macOS Ventura on M1 Silicon. \n\n### Installing VS Code\n\n* Download and install [VS Code](https://code.visualstudio.com/download).\n* Alternatively, you can also install it as a Homebrew cask: `brew install --cask visual-studio-code`.\n\n### Installing Clang as a compiler\n\n* On macOS, you'll need to install some developer tools. Open your terminal and type:\n\n```\nxcode-select --install\n```\n\nThis will prompt you to install Xcode's command line tools, which include the [Clang C++ compiler](https://clang.llvm.org/get_started.html).\n\nAfter the installation, you can check if `clang++` is installed by typing:\n\n```\nclang++ --version\n```\n\nYou should see an output that includes some information about the Clang version you have installed. \n\n### Setting up VS Code\n\n* Launch VS Code.\n* Install and configure [the GitLab Workflow extension](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow).\n* Optionally, in VS Code, install the [C/C++ Intellisense extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode.cpptools), which helps with debugging C/C++. \n\n## Getting started\nNow, let's start building this magical adventure with C++. We'll start with a \"Hello World\" example.\n\nCreate a new project `learn-ai-cpp-adventure`. In the project root, create `adventure.cpp`. The first part of every C++ program is the `main()` function. It's the entry point of the program.\n\nWhen you start writing `int main() {`, Code Suggestions will help autocomplete the function with some default parameters.\n\n![adventure.cpp with a hello world implementation suggested by Code Suggestions](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/0-helloworld.png){: .shadow}\n\n```cpp\nint main()\n{\n    cout \u003C\u003C \"Hello World\" \u003C\u003C endl;\n    return 0;\n}\n```\n\nWhile this is a good place to start, we need to add an include and update the output statement:\n\n```cpp\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n// Main function, the starting point of the program\nint main()\n{\n    // Print \"Hello World!\" to the console\n    std::cout \u003C\u003C \"Hello World!\" \u003C\u003C std::endl;\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n```\n\nThe program prints \"Hello World!\" to the console when executed.\n\n* `#include \u003Ciostream>`: Because we are building a text-based adventure, we will rely on input from the player using input and output operations (I/O) in C++. This include is a preprocessor directive that tells our program to include the `iostream` library, which provides facilities to use input and output streams, such as `std::cout` for output.\n\n* You might find that Code Suggestions suggests `int main(int argc, char* argv[])` as the definition of our main function. The parameters `(int argc, char* argv[])` are used to pass command-line arguments to the program. Code Suggestions added them as default parameters, but they are not needed if you're not using command-line arguments. In that case, we can also define the main function as `int main()`.\n\n* `std::cout \u003C\u003C \"Hello World!\" \u003C\u003C std::endl;`: outputs \"Hello World\" to the console. The stream operator `\u003C\u003C` is used to send the string to output. `std::endl` is an end-line character.\n\n* `return 0;`: we use `return 0;` to indicate the end of the `main()` function and return a value of 0. In C++, it is good practice to return 0 to indicate the program has completed successfully.\n\n### Compiling and running your program\nNow that we have some code, let's review how we'll compile and run this program. \n* Open your terminal or use the terminal in VSCode (View -> Terminal).\n* Navigate to your project directory.\n* Compile your program by typing:\n\n```bash\nclang++ adventure.cpp -o adventure\n```\n\nThis command tells the Clang++ compiler to compile adventure.cpp and create an executable named adventure. After this, run your program by typing:\n\n```\n./adventure\n```\n\nYou should see \"Hello World!\" printed in the terminal. \n\nBecause our tutorial uses a single source file `adventure.cpp`, we can use the compiler directly to build our program. In the future, if the program grows beyond a file, we'll set up additional configurations to handle compilation. \n\n## Setting the text adventure stage\nBefore we get into more code, let's set the stage for our text adventure.\n\nFor this text adventure, players will explore the Dragon Realm. The Dragon Realm is full of mountains, lakes, and magic. Our player will enter the Dragon Realm for the first time, explore different locations, meet new characters, collect magical items, and journal their adventure. At every location, they will be offered choices to decide the course of their journey.\n\nTo kick off our adventure into the Dragon Realm, let's update our `adventure.cpp main()` function to be more specific. As you update the welcome message, you might find that Code Suggestions already knows we're building a game.\n\n![adventure.cpp - Code Suggestions offers suggestion of welcoming users to the Dragon Realm and knows its a game](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/1-welcome-to-the-realm.png){: .shadow}\n\n```cpp\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n// Main function, the starting point of the program\nint main()\n{\n    // Print \"Hello World!\" to the console\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n```\n\n## Defining the adventure: Variables\nA variable stores data that can be used throughout the program scope in the `main()` function. A variable is defined by a type, which indicates the kind of data it can hold.\n\nLet's create a variable to hold our player's name and give it the type `string`. A `string` is designed to hold a sequence of characters so it's perfect for storing our player's name.\n\n```cpp\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n// Main function, the starting point of the program\nint main()\n{\n    // Print \"Hello World!\" to the console\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare a string variable to hold the player's name\n    std::string playerName;\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n```\n\nAs you do this, you may notice that Code Suggestions knows what's coming next - prompting the user for their player's name.\n\n![adventure.cpp - Code Suggestions suggests welcoming the player with the playerName variable](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/2-player-name-variable.png){: .shadow}\n\nWe may be able to get more complete and specific Code Suggestions by providing comments about what we'd like to do with the name - personally welcome the player to the game. Start by adding our plan of action in comments.\n\n```cpp\n    // Declare a string variable to hold the player's name\n    std::string playerName;\n\n    // Prompt the user to enter their player name\n\n    // Display a personalized welcome message to the player with their name\n```\n\nTo capture the player's name from input, we need to use the `std::cin` object from the `iostream` library to fetch input from the player using the extraction operator `>>`. If you start typing `std::` to start prompting the user, Code Suggestions will make some suggestions to help you gather user input and save it to our `playerName` variable.\n\n![adventure.cpp - Code Suggestions prompts the user to input their player name](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/2.1-player-name-input.png){: .shadow}\n\nNext, to welcome our player personally to the game, we want to use `std::cout` and the `playerName` variable together:\n\n```cpp\n    // Declare a string variable to store the player name\n    std::string playerName;\n\n    // Prompt the user to enter their player name\n    std::cout \u003C\u003C \"Please enter your name: \";\n    std::cin >> playerName;\n\n    // Display a personalized welcome message to the player with their name\n    std::cout \u003C\u003C \"Welcome \" \u003C\u003C playerName \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C std::endl;\n```\n\n## Crafting the adventure: Making decisions with conditionals\nIt's time to introduce our player to the different locations in tbe Dragon Realm they can visit. To prompt our player with choices, we use conditionals. Conditionals allow programs to take different actions based on criteria, such as user input.\n\nLet's offer the player a selection of locations to visit and capture their choice as an `int` value that corresponds to the location they picked.\n\n```cpp\n// Display a personalized welcome message to the player with their name\nstd::cout \u003C\u003C \"Welcome \" \u003C\u003C playerName \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C std::endl;\n\n// Declare an int variable to capture the user's choice\nint choice;\n```\n\nThen, we want to offer the player the different locations that are possible for that choice. Let's start with a comment and prompt Code Suggestions with `std::cout` to fill out the details for us.\n\n![adventure.cpp - Code Suggestions suggests a multiline output for all the locations listed in the code below](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3-setup-location-choice.png){: .shadow}\n\nAs you accept the suggestions, Code Suggestions will help build out the output and ask the player for their input.\n\n![adventure.cpp - Code Suggestions suggests a multiline output for all the locations listed in the code below and asks for player input](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.1-capture-player-location-choice.png){: .shadow}\n\n```cpp\n    // Declare an int variable to capture the user's choice\n    int choice;\n\n    // Offer the player a choice of 3 locations: 1 for Moonlight Markets, 2 for Grand Library, and 3 for Shimmer Lake.\n    std::cout \u003C\u003C \"Where will \" \u003C\u003C playerName \u003C\u003C \" go?\" \u003C\u003C std::endl;\n    std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n    std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n    std::cout \u003C\u003C \"3. Shimmer Lake\" \u003C\u003C std::endl;\n    std::cout \u003C\u003C \"Please enter your choice: \";\n    std::cin >> choice;\n```\n\nOnce you start typing `std::cin >>` or accept the prompt for asking the player for their choice, Code Suggestions might offer a suggestion for building out your conditional flow. AI is non-deterministic: One suggestion can involve if/else statements while another solution uses a switch statement.\n\nTo give Code Suggestions a nudge, we'll add a comment and start typing out an if statement: `if (choice ==)`.\n\n![adventure.cpp - Code Suggestions suggests using an if statement to manage choice of locations](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.2-if-statement-locations.png){: .shadow}\n\nAnd if you keep accepting the subsequent suggestions, Code Suggestions will autocomplete the code using if/else statements.\n\n![adventure.cpp - Code Suggestions helps the user fill out the rest of the if/else statements for choosing a location](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.2.1-if-statement-locations-continued.png){: .shadow}\n\n```cpp\n    // Check the user's choice and display the corresponding messages\n    if (choice == 1) {\n        std::cout \u003C\u003C \"You chose Moonlight Markets\" \u003C\u003C std::endl;\n    }\n    else if (choice == 2) {\n        std::cout \u003C\u003C \"You chose Grand Library\" \u003C\u003C std::endl;\n    }\n    else if (choice == 3) {\n        std::cout \u003C\u003C \"You chose Shimmer Lake\" \u003C\u003C std::endl;\n    }\n    else {\n        std::cout \u003C\u003C \"Invalid choice\" \u003C\u003C std::endl;\n    }\n```\n\n`if/else` is a conditional statement that allows a program to execute code based on whether a condition, in this case the player's choice, is true or false. If the condition evaluates to true, the code inside the braces is executed.\n\n* `if (condition)`: used to check if the condition is true.\n* `else if (another condition)`: if the previous condition isn't true, the programs checks this condition.\n* `else`: if none of the previous conditions are true.\n\nAnother way of managing multiple choices like this example is using a `switch()` statement. A `switch` statement allows our program to jump to different sections of code based on the value of an expression, which, in this case, is the value of `choice`.\n\nWe are going to replace our `if/else` statements with a `switch` statement. You can comment out or delete the `if/else` statements and prompt Code Suggestions starting with `switch(choice) {`.\n\n![adventure.cpp - Code Suggestions helps the user handle the switch statement for the locations](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.3-conditional-switch-locations.png){: .shadow}\n\n![adventure.cpp - Code Suggestions helps the user handle the switch statement for the locations](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.3.1-conditional-switch-locations-continued.png){: .shadow}\n\n```cpp\n    // Evaluate the player's decision\n    switch(choice) {\n        // If 'choice' is 1, this block is executed.\n        case 1:\n            std::cout \u003C\u003C \"You chose Moonlight Markets.\" \u003C\u003C std::endl;\n            break;\n        // If 'choice' is 2, this block is executed.\n        case 2:\n            std::cout \u003C\u003C \"You chose Grand Library.\" \u003C\u003C std::endl;\n            break;\n        // If 'choice' is 3, this block is executed.\n        case 3:\n            std::cout \u003C\u003C \"You chose Shimmer Lake.\" \u003C\u003C std::endl;\n            break;\n        // If 'choice' is not 1, 2, or 3, this block is executed.\n        default:\n            std::cout \u003C\u003C \"You did not enter 1, 2, or 3.\" \u003C\u003C std::endl;\n    }\n```\n\nEach case represents a potential value that the variable or expression being switched on (in this case, choice) could have. If a match is found, the code for that case is executed. We use the `default` case to handle any input errors in case the player enters a value that isn't accounted for.\n\nLet's build out what happens when our player visits the Shimmering Lake. I've added some comments after the player's arrival at Shimmering Lake to prompt Code Suggestions to help us build this out:\n\n```cpp\n    // If 'choice' is 3, this block is executed.\n    case 3:\n        std::cout \u003C\u003C \"You chose Shimmering Lake.\" \u003C\u003C std::endl;\n        // The player arrives at Shimmering Lake. It is one of the most beautiful lakes the player has ever seen.\n        // The player hears a mysterious melody from the water.\n        // They can either 1. Stay quiet and listen, or 2. Sing along with the melody.\n\n        break;\n```\n\nNow, if you start writing `std::cout` to begin offering the player this new decision point, Code Suggestions will help fill out the output code.\n\n![adventure.cpp - Code Suggestions helps fill out the output code based on the comments about the interaction at the Lake](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4-case-3-output.png){: .shadow}\n\nYou might find that the code provided by Code Suggestions is very declarative. Once I've accepted the suggestion, I personalize the code as needed. For example in this case, including the melody the player heard and using the player's name instead of \"you\":\n\n![adventure.cpp - I added the playerName to the output and then prompted Code Suggestions to continue the narrative based on the comments above](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4.1-customizing-output.png){: .shadow}\n\nI also wanted Code Suggestions to offer suggestions in a specific format, so I added an end line:\n\n![adventure.cpp - I added an end line to prompt Code Suggestions to break the choices into end line outputs](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4.2-customizing-output-endline.png){: .shadow}\n\n![adventure.cpp - I added an endline to prompt Code Suggestions to break the choices into end line outputs](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4.3-sub-choices-output.png){: .shadow}\n\nNow, we'd like to offer our player a nested choice in this scenario. Before we can define the new choices, we need a variable to store this nested choice. Let's define a new variable `int nestedChoice` in our `main()` function, outside of the `switch()` statement we set up. You can put it after our definition of the `choice` variable.\n\n```cpp\n    // Declare an int variable to capture the user's choice\n    int choice;\n    // Declare an int variable to capture the user's nested choice\n    int nestedChoice;\n```\n\nNext, returning to the `if/else` statement we were working on in `case 3`, we want to prompt the player for their decision and save it in `nestedChoice`.\n\n![adventure.cpp - I added an end line to prompt Code Suggestions to break the choices into end line outputs](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4.4-capture-nested-choice.png){: .shadow}\n\nAs you can see, Code Suggestions wants to go ahead and handle the user's choice using another `switch` statement. I would prefer to use an `if/else` statement to handle this decision point.\n\nFirst, let's add some comments to give context:\n\n```cpp\n    // Capture the user's nested choice\n    std::cin >> nestedChoice;\n\n    // If the player chooses 1 and remains silent, they hear whispers of the merfolk below, but nothing happens.\n    // If the player chooses 2 and sings along, a merfolk surfaces and gifts them a special blue gem as a token of appreciation for their voice.\n\n    // Evaluate the user's nestedChoice\n```\n\nThen, start typing `if (nestedChoice == 1)` and Code Suggestions will start to offer suggestions:\n\n![adventure.cpp - Code Suggestions starts to build out an if statement to handle the nestedChoice](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.5-nested-choice-if.png){: .shadow}\n\nIf you tab to accept them, Code Suggestions will continue to fill out the rest of the nested `if/else` statements.\n\nSometimes, while you're customizing the suggestions that Code Suggestions gives, you may even discover that it would like to make creative suggestions, too!\n\n![adventure.cpp - Code Suggestions makes a creative suggestion to end the interaction with the merfolk by saying \"You are now free to go\" after you receive the gem.](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.5.2-nested-cs-creative-suggestion.png){: .shadow}\n\nHere's the code for `case 3` for the player's interaction at Shimmering Lake with the nested decision. I've updated some of the narrative dialogue player's name.\n```\n    // Handle the Shimmering Lake scenario.\n    case 3:\n        std::cout \u003C\u003C playerName \u003C\u003C \" arrives at Shimmering Lake. It is one of the most beautiful lakes that\" \u003C\u003C playerName \u003C\u003C \" has seen. They hear a mysterious melody from the water. They can either: \" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Stay quiet and listen\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Sing along with the melody\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n\n        // Capture the user's nested choice\n        std::cin >> nestedChoice;\n\n        // If the player chooses to remain silent\n        if (nestedChoice == 1)\n        {\n            std::cout \u003C\u003C \"Remaining silent, \" \u003C\u003C playerName \u003C\u003C \" hears whispers of the merfolk below, but nothing happens.\" \u003C\u003C std::endl;\n        }\n        // If the player chooses to sing along with the melody\n        else if (nestedChoice == 2)\n        {\n            std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C playerName\n                    \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                    \u003C\u003C std::endl;\n        }\n        break;\n```\n\nOur player isn't limited to just exploring Shimmering Lake. There's a whole realm to explore and they might want to go back and explore other locations.\n\nTo facilitate this, we can use a `while` loop. A loop is a type of conditional that allows a specific section of code to be executed multiple times based on a condition. For the `condition` that allows our `while` loop to run multiple times, let's use a `boolean` to initialize the loop condition.\n\n```cpp\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n        // wrap the code for switch(choice)\n    }\n```\n\nWe also need to move our location prompt inside the `while` loop so that the player can visit more than one location at the time.\n\n![adventure.cpp - CS helps us write a go next prompt for the locations](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.6-while-loop-go-next.png){: .shadow}\n\n```cpp\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n\n        // If still exploring, ask the player where they want to go next\n        std::cout \u003C\u003C \"Where will \" \u003C\u003C playerName \u003C\u003C \" go next?\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"3. Shimmering Lake\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n        // Update value of choice\n        std::cin >> choice;\n\n        // Respond based on the player's main choice\n        switch(choice) {\n```\n\nOur `while` loop will keep running as long as `exploring` is `true`, so we need a way for the player to have the option to exit the game. Let's add a case 4 that allows the player to exit by setting `exploring = false`. This will exit the loop and take the player back to the original choices.\n\n```cpp\n    // Option to exit the game\n    case 4:\n        exploring = false;\n        break;\n```\n\n**Async exercise**: Give the player the option to exit the game instead of exploring a new decision.\n\nWe also need to update the error handling for invalid inputs in the `switch` statement. You can decide whether to end the program or use the `continue` statement to start a new loop iteration.\n\n```cpp\n        default:\n            std::cout \u003C\u003C \"You did not enter a valid choice.\" \u003C\u003C std::endl;\n            continue; // Errors continue with the next loop iteration\n```\n\nUsing I/O and conditionals is at the core of text-based adventure games and helps make these games interactive. We can combine user input, display output, and implement our narrative into decision-making logic to create an engaging experience.\n\nHere's what our `adventure.cpp` looks like now with some comments:\n\n```cpp\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n// Main function, the starting point of the program\nint main()\n{\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare a string variable to store the player name\n    std::string playerName;\n\n    // Prompt the user to enter their player name\n    std::cout \u003C\u003C \"Please enter your name: \";\n    std::cin >> playerName;\n\n    // Display a personalized welcome message to the player with their name\n    std::cout \u003C\u003C \"Welcome \" \u003C\u003C playerName \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare an int variable to capture the user's choice\n    int choice;\n    // Declare an int variable to capture the user's nested choice\n    int nestedChoice;\n\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n\n        // If still exploring, ask the player where they want to go next\n        std::cout \u003C\u003C \"Where will \" \u003C\u003C playerName \u003C\u003C \" go next?\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"3. Shimmering Lake\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n        // Update value of choice\n        std::cin >> choice;\n\n        // Respond based on the player's main choice\n        switch(choice) {\n            //  Handle the Moonlight Markets scenario\n            case 1:\n                std::cout \u003C\u003C \"You chose Moonlight Markets.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Grand Library scenario.\n            case 2:\n                std::cout \u003C\u003C \"You chose Grand Library.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Shimmering Lake scenario.\n            case 3:\n                std::cout \u003C\u003C playerName \u003C\u003C \" arrives at Shimmering Lake. It is one of the most beautiful lakes that\" \u003C\u003C playerName \u003C\u003C \" has seen. They hear a mysterious melody from the water. They can either: \" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"1. Stay quiet and listen\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"2. Sing along with the melody\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"Please enter your choice: \";\n\n                // Capture the user's nested choice\n                std::cin >> nestedChoice;\n\n                // If the player chooses to remain silent\n                if (nestedChoice == 1)\n                {\n                    std::cout \u003C\u003C \"Remaining silent, \" \u003C\u003C playerName \u003C\u003C \" hears whispers of the merfolk below, but nothing happens.\" \u003C\u003C std::endl;\n                }\n                // If the player chooses to sing along with the melody\n                else if (nestedChoice == 2)\n                {\n                    std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C playerName\n                            \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                            \u003C\u003C std::endl;\n                }\n                break;\n            // Option to exit the game\n            case 4:\n                exploring = false;\n                break;\n            // If 'choice' is not 1, 2, or 3, this block is executed.\n            default:\n                std::cout \u003C\u003C \"You did not enter a valid choice.\" \u003C\u003C std::endl;\n                continue; // Errors continue with the next loop iteration\n        }\n    }\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n```\n\nHere's what the build output looks like if we run `adventure.cpp` and the player heads to the Shimmering Lake.\n\n![adventure.cpp build output - the player is called sugaroverflow and heads to the Shimmering Lake and receives a gem](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.6.1-full-case-3-output.png){: .shadow}\n\n## Structuring the narrative: Characters\nOur player can now explore the world. Soon, our player will also be able to meet people and collect objects. Before we can do that, let's organize the things our player can do with creating some structure for the player character.\n\nIn C++, a `struct` is used to group different data types. It's helpful in creating a group of items that belong together, such as our player's attributes and inventory, into a single unit. `struct` objects are defined globally, which means at top the file, before the `main() function.\n\nIf you start typing `struct Player {`, Code Suggestions will help you out with a sample definition of a player struct.\n\n![adventure.cpp - Code Suggestions helps with setting up the struct definition for the player](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/4-player-struct-definition.png){: .shadow}\n\nAfter accepting this suggestion, you might find that Code Suggestions is eager to define some functions to make this game more fun, such as hunting for treasure.\n\n![adventure.cpp - Code Suggestions provides a suggestion for creating functions to hunt for treasure.](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/4.1-player-struct-treasure-suggestion.png){: .shadow}\n\n```cpp\n// Define a structure for a Player in the game.\nstruct Player{\n    std::string name;  // The name of the player.\n    int health;        // The current health of the player.\n    int xp;            // Experience points gained by the player. Could be used for leveling up or other game mechanics.\n};\n```\n\nGiving the player experience points was not in my original plan for this text adventure game, but Code Suggestions makes an interesting suggestion. We could use `xp` for leveling up or for other game mechanics as our project grows.\n\n`struct Player` provides a blueprint for creating a player and details the attributes that make up a player. To use our player in our code, we must instantiate, or create, an object of the `Player` struct within our `main()` function. Objects in C++ are instances of structures that contain attributes. In our example, we're working with the `Player` struct, which has attributes like name, health, and xp.\n\nAs you're creating a `Player` object, you might find that Code Suggestions wants to name the player \"John.\"\n\n![adventure.cpp - code suggestions suggests naming the new Player object John.](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/4.2-player-struct-instance-john.png){: .shadow}\n\n```cpp\nint main() {\n    // Create an instance of the Player struct\n    Player player;\n    player.health = 100; // Assign a default value for HP\n```\n\nInstead of naming our player \"John\" for everyone, we'll use the `Player` object to set the attribute for name. When we want to interact with or manipulate an attribute of an object, we use the dot operator `.`. The dot operator allows us to access specific members of the object. We can set the player's name using the dot operator with `player.name`.\n\nNote that we need to replace other mentions of `playerName` the variable with `player.name`, which allows us to access the player object's name directly.\n\n* Search for all occurrences of the `playerName` variable, and replace it with `player.name`.\n* Comment/Remove the unused `std::string playerName` variable after that.\n\nWhat your `adventure.cpp` will look like now:\n\n```cpp\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n// Define a structure for a Player in the game.\nstruct Player{\n    std::string name;  // The name of the player.\n    int health;        // The current health of the player.\n    int xp;            // Experience points gained by the player. Could be used for leveling up or other game mechanics.\n};\n\n// Main function, the starting point of the program\nint main()\n{\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Create an instance of the Player struct\n    Player player;\n    player.health = 100; // Assign a default value for HP\n\n    // Prompt the user to enter their player name\n    std::cout \u003C\u003C \"Please enter your name: \";\n    std::cin >> player.name;\n\n    // Display a personalized welcome message to the player with their name\n    std::cout \u003C\u003C \"Welcome \" \u003C\u003C player.name \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare an int variable to capture the user's choice\n    int choice;\n    // Declare an int variable to capture the user's nested choice\n    int nestedChoice;\n\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n\n        // If still exploring, ask the player where they want to go next\n        std::cout \u003C\u003C \"Where will \" \u003C\u003C player.name \u003C\u003C \" go next?\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"3. Shimmering Lake\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n        // Update value of choice\n        std::cin >> choice;\n\n        // Respond based on the player's main choice\n        switch(choice) {\n            //  Handle the Moonlight Markets scenario\n            case 1:\n                std::cout \u003C\u003C \"You chose Moonlight Markets.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Grand Library scenario.\n            case 2:\n                std::cout \u003C\u003C \"You chose Grand Library.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Shimmering Lake scenario.\n            case 3:\n                std::cout \u003C\u003C player.name \u003C\u003C \" arrives at Shimmering Lake. It is one of the most beautiful lakes that\" \u003C\u003C player.name \u003C\u003C \" has seen. They hear a mysterious melody from the water. They can either: \" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"1. Stay quiet and listen\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"2. Sing along with the melody\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"Please enter your choice: \";\n\n                // Capture the user's nested choice\n                std::cin >> nestedChoice;\n\n                // If the player chooses to remain silent\n                if (nestedChoice == 1)\n                {\n                    std::cout \u003C\u003C \"Remaining silent, \" \u003C\u003C player.name \u003C\u003C \" hears whispers of the merfolk below, but nothing happens.\" \u003C\u003C std::endl;\n                }\n                // If the player chooses to sing along with the melody\n                else if (nestedChoice == 2)\n                {\n                    std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C player.name\n                            \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                            \u003C\u003C std::endl;\n                }\n                break;\n            // Option to exit the game\n            case 4:\n                exploring = false;\n                break;\n            // If 'choice' is not 1, 2, or 3, this block is executed.\n            default:\n                std::cout \u003C\u003C \"You did not enter a valid choice.\" \u003C\u003C std::endl;\n                continue; // Errors continue with the next loop iteration\n        }\n    }\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n```\n\n## Structuring the narrative: Items\nAn essential part of adventure games is a player's inventory - the collection of items they acquire and use during their journey. For example, at Shimmering Lake, the player acquired a blue gem.\n\nLet's update our Player `struct` to include an inventory using an array. In C++, an `array` is a collection of elements of the same type that can be identified by an index. When creating an array, you need to specify its type and size. Start by adding `std::string inventory` to the Player `struct`:\n\n![adventure.cpp - Code Suggestions shows us how to add an array of strings to the player struct to use as the players inventory](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5-add-inventory-player-struct.png){: .shadow}\n\nYou might find that Code Suggestions wants our player to be able to carry some gold, but we don't need that for now. Let's also add `int inventoryCount;` to keep track of the number of items in our player's inventory.\n\n![adventure.cpp - Code Suggestions shows us how to add an integer for inventoryCount to the player struct](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.1-add-inventory-count-player-struct.png){: .shadow}\n\n```cpp\n// Define a structure for a Player in the game.\nstruct Player{\n    std::string name;  // The name of the player.\n    int health;        // The current health of the player.\n    int xp;            // Experience points gained by the player. Could be used for leveling up or other game mechanics.\n    std::string inventory[10];  // An array of strings for the player's inventory.\n    int inventoryCount = 0;  // The number of items in the player's inventory.\n};\n```\nIn our Player `struct`, we have defined an array for our inventory that can hold the names of 10 items (type:string, size: 10). As the player progresses through our story, we can assign new items to the inventory array based on the player's actions using the array index.\n\nSometimes Code Suggestions gets ahead of me and tries to add more complexity to the game by suggesting that we need to create a `struct` for some Monsters. Maybe later, Code Suggestions!\n\n![adventure.cpp - Code Suggestions wants to add a struct for Monsters we can battle](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.2-suggestion-gets-distracted-by-monsters.png\n){: .shadow}\n\nBack at the Shimmering Lake, the player received a special blue gem from the merfolk. Let's update the code in `case 2` for the Shimmering Lake to add the gem to our player's inventory.\n\nYou can start by accessing the player's inventory with `player.inventory` and Code Suggestions will help add the gem.\n\n![adventure.cpp - Code Suggestions shows us how to add a gem to the player's inventory using a post-increment operation and the inventory array from the struct object](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.3-add-gem-to-inventory.png){: .shadow}\n\n```cpp\n    // If the player chooses to sing along with the melody\n    else if (nestedChoice == 2)\n    {\n        std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C player.name\n                \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                \u003C\u003C std::endl;\n        player.inventory[player.inventoryCount] = \"Blue Gem\";\n        player.inventoryCount++;\n    }\n```\n\n* `player.inventory`: accesses the inventory attribute of the player object\n* `player.inventoryCount`: accesses the integer that keeps track of how many items are currently in the player's inventory. This also represents the next available index in our inventory array where an item can be stored.\n* `player.inventoryCount++`: increments the value of inventoryCount by 1. This is a post-increment operation. We are adding “Blue Gem” to the next available slot in the inventory array and incrementing the array for the newly added item.\n\nOnce we've added something to our player's inventory, we may also want to be able to look at everything in the inventory. We can use a `for` loop to iterate over the inventory array and display each item.\n\nIn C++, a `for` loop allows code to be repeatedly executed a specific number of times. It's different from the `while` loop we used earlier because the `while` executes its body based on a condition, whereas a `for` loop iterates over a sequence or range, usually with a known number of times.\n\nAfter adding the gem to the player's inventory, let's display all the items it has. Try starting a for loop with `for ( ` to display the player's inventory and Code Suggestions will help you with the syntax.\n\n![adventure.cpp - Code Suggestions demonstrates how to write a for loop to loop through the players inventory](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.4-loop-over-players-inventory.png){: .shadow}\n\n```cpp\nstd::cout \u003C\u003C player.name \u003C\u003C \"'s Inventory:\" \u003C\u003C std::endl;\n// Loop through the player's inventory up to the count of items they have\nfor (int i = 0; i \u003C player.inventoryCount; i++)\n{\n    // Output the item in the inventory slot\n    std::cout \u003C\u003C \"- \" \u003C\u003C player.inventory[i] \u003C\u003C std::endl;\n}\n```\n\nA `for` loop consists of 3 main parts:\n\n* `int i = 0`: is the initialization where you set up your loop variable. Here, we start counting from 0.\n* `i \u003C player.inventoryCount`: is the condition we're looping on, our loop checks if `i`, the current loop variable, is less than the number of items in our inventory. It will keep going until this is true.\n* `i++`: is the iteration. This updates the loop variable each time the loop runs.\n\nTo make sure that our loop doesn't encounter an error, let's add some error handling to make sure the inventory is not empty when we try to output it.\n\n```\nstd::cout \u003C\u003C player.name \u003C\u003C \"'s Inventory:\" \u003C\u003C std::endl;\n// Loop through the player's inventory up to the count of items they have\nfor (int i = 0; i \u003C player.inventoryCount; i++)\n{\n    // Check if the inventory slot is not empty.\n    if (!player.inventory[i].empty())\n    {\n        // Output the item in the inventory slot\n        std::cout \u003C\u003C \"- \" \u003C\u003C player.inventory[i] \u003C\u003C std::endl;\n    }\n}\n```\n\nWith our progress so far, we've successfully established a persistent `while` loop for our adventure, handled decisions, crafted a `struct` for our player, and implemented a simple inventory system. Now, let's dive into the next scenario, the Grand Library, applying the foundations we've learned.\n\n**Async exercise**: Add more inventory items found in different locations.\n\nHere's what we have for `adventure.cpp` so far:\n\n```cpp\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n// Define a structure for a Player in the game.\nstruct Player{\n    std::string name;  // The name of the player.\n    int health;        // The current health of the player.\n    int xp;            // Experience points gained by the player. Could be used for leveling up or other game mechanics.\n    std::string inventory[10];  // An array of strings for the player's inventory.\n    int inventoryCount = 0;\n};\n\n// Main function, the starting point of the program\nint main()\n{\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Create an instance of the Player struct\n    Player player;\n    player.health = 100; // Assign a default value for HP\n\n    // Prompt the user to enter their player name\n    std::cout \u003C\u003C \"Please enter your name: \";\n    std::cin >> player.name;\n\n    // Display a personalized welcome message to the player with their name\n    std::cout \u003C\u003C \"Welcome \" \u003C\u003C player.name \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare an int variable to capture the user's choice\n    int choice;\n    // Declare an int variable to capture the user's nested choice\n    int nestedChoice;\n\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n\n        // If still exploring, ask the player where they want to go next\n        std::cout \u003C\u003C \"--------------------------------------------------------\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Where will \" \u003C\u003C player.name \u003C\u003C \" go next?\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"3. Shimmering Lake\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n        // Update value of choice\n        std::cin >> choice;\n\n        // Respond based on the player's main choice\n        switch(choice) {\n            //  Handle the Moonlight Markets scenario\n            case 1:\n                std::cout \u003C\u003C \"You chose Moonlight Markets.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Grand Library scenario.\n            case 2:\n                std::cout \u003C\u003C \"You chose Grand Library.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Shimmering Lake scenario.\n            case 3:\n                std::cout \u003C\u003C player.name \u003C\u003C \" arrives at Shimmering Lake. It is one of the most beautiful lakes that\" \u003C\u003C player.name \u003C\u003C \" has seen. They hear a mysterious melody from the water. They can either: \" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"1. Stay quiet and listen\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"2. Sing along with the melody\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"Please enter your choice: \";\n\n                // Capture the user's nested choice\n                std::cin >> nestedChoice;\n\n                // If the player chooses to remain silent\n                if (nestedChoice == 1)\n                {\n                    std::cout \u003C\u003C \"Remaining silent, \" \u003C\u003C player.name \u003C\u003C \" hears whispers of the merfolk below, but nothing happens.\" \u003C\u003C std::endl;\n                }\n                // If the player chooses to sing along with the melody\n                else if (nestedChoice == 2)\n                {\n                    std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C player.name\n                            \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                            \u003C\u003C std::endl;\n                    player.inventory[player.inventoryCount] = \"Blue Gem\";\n                    player.inventoryCount++;\n\n                    std::cout \u003C\u003C player.name \u003C\u003C \"'s Inventory:\" \u003C\u003C std::endl;\n                    // Loop through the player's inventory up to the count of items they have\n                    for (int i = 0; i \u003C player.inventoryCount; i++)\n                    {\n                        // Check if the inventory slot is not empty.\n                        if (!player.inventory[i].empty())\n                        {\n                            // Output the item in the inventory slot\n                            std::cout \u003C\u003C \"- \" \u003C\u003C player.inventory[i] \u003C\u003C std::endl;\n                        }\n                    }\n\n                }\n                break;\n            // Option to exit the game\n            case 4:\n                exploring = false;\n                break;\n            // If 'choice' is not 1, 2, or 3, this block is executed.\n            default:\n                std::cout \u003C\u003C \"You did not enter a valid choice.\" \u003C\u003C std::endl;\n                continue; // Errors continue with the next loop iteration\n        }\n    }\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n```\n\n![adventure.cpp - A full output of the game at the current state - our player sugaroverflow visits the Lake, receives the gem, adds it to their inventory, and we display the inventory before returning to the loop](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.5-full-output-shimmering-lake.png){: .shadow}\n",[482,9,746,703,724],{"slug":1013,"featured":6,"template":680},"building-a-text-adventure-using-cplusplus-and-code-suggestions","content:en-us:blog:building-a-text-adventure-using-cplusplus-and-code-suggestions.yml","Building A Text Adventure Using Cplusplus And Code Suggestions","en-us/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions.yml","en-us/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions",{"_path":1019,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1020,"content":1026,"config":1040,"_id":1042,"_type":14,"title":1043,"_source":16,"_file":1044,"_stem":1045,"_extension":19},"/en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features",{"title":1021,"description":1022,"ogTitle":1021,"ogDescription":1022,"noIndex":6,"ogImage":1023,"ogUrl":1024,"ogSiteName":693,"ogType":694,"canonicalUrls":1024,"schema":1025},"Building GitLab with GitLab: A multi-region service to deliver AI features","Discover how we built our first multi-region deployment for teams at GitLab using the platform's many features, helping create a frictionless developer experience for GitLab Duo users.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098664/Blog/Hero%20Images/Blog/Hero%20Images/building-gitlab-with-gitlab-no-type_building-gitlab-with-gitlab-no-type.png_1750098663794.png","https://about.gitlab.com/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: A multi-region service to deliver AI features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chance Feick\"},{\"@type\":\"Person\",\"name\":\"Sam Wiskow\"}],\n        \"datePublished\": \"2024-09-12\",\n      }",{"title":1021,"description":1022,"authors":1027,"heroImage":1023,"date":1030,"body":1031,"category":1032,"tags":1033},[1028,1029],"Chance Feick","Sam Wiskow","2024-09-12","For GitLab Duo, real-time AI-powered capabilities like [Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) need low-latency response times for a frictionless developer experience. Users don’t want to interrupt their flow and wait for a code suggestion to show up. To ensure GitLab Duo can provide the right suggestion at the right time and meet high performance standards for critical AI infrastructure, GitLab recently launched our first multi-region service to deliver AI features.\n\nIn this article, we will cover the benefits of multi-region services, how we built an internal platform codenamed ‘Runway’ for provisioning and deploying multi-region services using GitLab features, and the lessons learned migrating to multi-region in production.\n\n## Background on the project\n\nRunway is GitLab’s internal platform as a service (PaaS) for provisioning, deploying, and operating containerized services. Runway's purpose is to enable GitLab service owners to self-serve infrastructure needs with production readiness out of the box, so application developers can focus on providing value to customers. As part of [our corporate value of dogfooding](https://handbook.gitlab.com/handbook/values/#results), the first iteration was built in 2023 by the Infrastructure department on top of core GitLab capabilities, such as continuous integration/continuous delivery ([CI/CD](https://about.gitlab.com/topics/ci-cd/)), environments, and deployments.\n\nBy establishing automated GitOps best practices, Runway services use infrastructure as code (IaC), merge requests (MRs), and CI/CD by default.\n\nGitLab Duo is primarily powered by [AI Gateway](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist), a satellite service written in Python outside of GitLab’s modular monolith written in Ruby. In cloud computing, a region is a geographical location of data centers operated by cloud providers.\n\n## Defining a multi-region strategy\n\nDeploying in a single region is a good starting point for most services, but can come with downsides when you are trying to reach a global audience. Users who are geographically far from where your service is deployed may experience different levels of service and responsiveness than those who are closer. This can lead to a poor user experience, even if your service is well built in all other respects.\n\nFor AI Gateway, it was important to meet global customers wherever they are located, whether on GitLab.com or self-managed instances using Cloud Connector. When a developer is deciding to accept or reject a code suggestion, milliseconds matter and can define the user experience.\n\n### Goals\n\nMulti-region deployments require more infrastructure complexity, but for use cases where latency is a core component of the user experience, the benefits often outweigh the downsides. First, multi-region deployments offer increased responsiveness to the user. By serving requests from locations closest to end users, latency can be significantly reduced. Second, multi-region deployments provide greater availability. With fault tolerance, services can fail over during a regional outage. There is a much lower chance of a service failing completely, meaning users should not be interrupted even in partial failures.\n\nBased on our goals for performance and availability, we used this opportunity to create a scalable multi-region strategy in Runway, which is built leveraging GitLab features.\n\n### Architecture\n\nIn SaaS platforms, GitLab.com’s infrastructure is hosted on Google Cloud Platform (GCP). As a result, Runway’s first supported platform runtime is Cloud Run. The initial workloads deployed on Runway are stateless satellite services (e.g., AI Gateway), so Cloud Run services are a good fit that provide a clear migration path to more complex and flexible platform runtimes, e.g. Kubernetes.\n\nBuilding Runway on top of GCP Cloud Run using GitLab has allowed us to iterate and tease out the right level of abstractions for service owners as part of a platform play in the Infrastructure department.\n\nTo serve traffic from multiple regions in Cloud Run, the multi-region deployment strategy must support global load balancing, and the provisioning and configuration of regional resources. Here’s a simplified diagram of the proposed architecture in GCP:\n\n![simplified diagram of the proposed architecture in GCP](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098671/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098671612.png)\n\nBy replicating Cloud Run services across multiple regions and configuring the existing global load balancing with serverless network endpoint group (NEG) backends, we’re able to serve traffic from multiple regions. For the remainder of the article, we’ll focus less on specifics of Cloud Run and more on how we’re building with GitLab.\n\n## Building a multi-region platform with GitLab\n\nNow that you have context about Runway, let's walk through how to build a multi-region platform using GitLab features.\n\n### Provision\n\nWhen building an internal platform, the first challenge is provisioning infrastructure for a service. In Runway, Provisioner is the component that is responsible for maintaining a service inventory and managing IaC for GCP resources using Terraform.\n\nTo provision a service, an application developer will open an MR to add a service project to the inventory using git, and Provisioner will create required resources, such as service accounts and identity and access management policies. When building this functionality with GitLab, Runway leverages [OpenID Connect (OIDC) with GPC Workload Identity Federation](https://docs.gitlab.com/ee/ci/cloud\\_services/google\\_cloud/) for managing IaC.\n\nAdditionally, Provisioner will create a deployment project for each service project. The purpose of creating separate projects for deployments is to ensure the [principle of least privilege](https://about.gitlab.com/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab/) by authenticating as a GCP service account with restricted permissions. Runway leverages the [Projects API](https://docs.gitlab.com/ee/api/projects.html) for creating projects with [Terraform provider](https://registry.terraform.io/providers/gitlabhq/gitlab/latest/docs).\n\nFinally, Provisioner defines variables in the deployment project for the service account, so that deployment CI jobs can authenticate to GCP. Runway leverages [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) and [Job Token allowlist](https://docs.gitlab.com/ee/ci/jobs/ci\\_job\\_token.html\\#add-a-group-or-project-to-the-job-token-allowlist) to handle authentication and authorization.\n\nHere’s a simplified example of provisioning a multi-region service in the service inventory:\n\n```\n{\n  \"inventory\": [\n    {\n      \"name\": \"example-service\",\n      \"project_id\": 46267196,\n      \"regions\": [\n        \"europe-west1\",\n        \"us-east1\",\n        \"us-west1\"\n      ]\n    }\n  ]\n}\n```\n\nOnce provisioned, a deployment project and necessary infrastructure will be created for a service.\n\n### Configure\n\nAfter a service is provisioned, the next challenge is the configuration for a service. In Runway, [Reconciler](https://gitlab.com/gitlab-com/gl-infra/platform/runway/runwayctl) is a component that is responsible for configuring and deploying services by aligning the actual state with the desired state using Golang and Terraform.\n\nHere’s a simplified example of an application developer configuring GitLab CI/CD in their service project:\n\n```\n# .gitlab-ci.yml\nstages:\n  - validate\n  - runway_staging\n  - runway_production\n\ninclude:\n  - project: 'gitlab-com/gl-infra/platform/runway/runwayctl'\n    file: 'ci-tasks/service-project/runway.yml'\n    inputs:\n      runway_service_id: example-service\n      image: \"$CI_REGISTRY_IMAGE/${CI_PROJECT_NAME}:${CI_COMMIT_SHORT_SHA}\"\n      runway_version: v3.22.0\n\n# omitted for brevity\n```\n\nRunway provides sane default values for configuration that are based on our experience in delivering stable and reliable features to customers. Additionally, service owners can configure infrastructure using a service manifest file hosted in a service project. The service manifest uses JSON Schema for validation. When building this functionality with GitLab, Runway leverages [Pages](https://docs.gitlab.com/ee/user/project/pages/) for schema documentation.\n\nTo deliver this part of the platform, Runway leverages [CI/CD templates](https://docs.gitlab.com/ee/development/cicd/templates.html), [Releases](https://docs.gitlab.com/ee/user/project/releases/), and [Container Registry](https://docs.gitlab.com/ee/user/packages/container\\_registry/) for integrating with service projects.\n\nHere’s a simplified example of a service manifest:\n\n```\n# .runway/runway-production.yml\napiVersion: runway/v1\nkind: RunwayService\nspec:\n container_port: 8181\n regions:\n   - us-east1\n   - us-west1\n   - europe-west1\n\n# omitted for brevity\n```\n\nFor multi-region services, Runway injects an environment variable into the container instance runtime, e.g. RUNWAY\\_REGION, so application developers have the context to make any downstream dependencies regionally-aware, e.g. Vertex AI API.\n\nOnce configured, a service project will be integrated with a deployment project.\n\n### Deploy\n\nAfter a service project is configured, the next challenge is deploying a service. In Runway, Reconciler handles this by triggering a deployment job in the deployment project when an MR is merged to the main branch. When building this functionality with GitLab, Runway leverages [Trigger Pipelines](https://docs.gitlab.com/ee/ci/triggers/) and [Multi-Project Pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream\\_pipelines.html\\#multi-project-pipelines) to trigger jobs from service project to deployment project.\n\n![trigger jobs from service project to deployment project](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098671612.png)\n\nOnce a pipeline is running in a deployment project, it will be deployed to an environment. By default, Runway will provision staging and production environments for all services. At this point, Reconciler will apply any Terraform resource changes for infrastructure. When building this functionality with GitLab, Runway leverages [Environments/Deployments](https://docs.gitlab.com/ee/ci/environments/) and [GitLab-managed Terraform state](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform\\_state.html) for each service.\n\n![Reconciler applies any Terraform resource changes for infrastructure](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098671614.png)\n\nRunway provides default application metrics for services. Additionally, custom metrics can be used by enabling a sidecar container with OpenTelemetry Collector configured to scrape Prometheus and remote write to Mimir. By providing observability out of the box, Runway is able to bake monitoring into CI/CD pipelines.\n\nExample scenarios include gradual rollouts for blue/green deployments, preventing promotions to production when staging is broken, or automatically rolling back to previous revision when elevated error rates occur in production.\n\n![Runway bakes monitoring into CI/CD pipelines](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098671615.png)\n\nOnce deployed, environments will serve the latest revision of a service. At this point, you should have a good understanding of some of the challenges that will be encountered, and how to solve them with GitLab features.\n\n## Migrating to multi-region in production\n\nAfter extending Runway components to support multi-region in Cloud Run, the final challenge was migrating from AI Gateway’s single-region deployment in production with zero downtime. Today, teams using Runway to deploy their services can self-serve on regions making a multi-region deployment just as simple as a single-region deployment. \n\nWe were able to iterate on building multi-region functionality without impacting existing infrastructure by using semantic versioning for Runway. Next, we’ll share some learnings from the migration that may inform how to operate services for an internal multi-region platform.\n\n### Dry run deployments\n\nIn Runway, Reconciler will apply Terraform changes in CI/CD. The trade-off is that plans cannot be verified in advance, which could risk inadvertently destroying or misconfiguring production infrastructure. To solve this problem, Runway will perform a “dry run” deployment for MRs.\n\n![\"Dry run\" deployment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098671616.png)\n\nFor migrating AI Gateway, dry run deployments increased confidence and helped mitigate risk of downtime during rollout. When building an internal platform with GitLab, we recommend supporting dry run deployments from the start.\n\n### Regional observability\n\nIn Runway, existing observability was aggregated by assuming a single-region deployment. To solve this problem, Runway observability was retrofitted to include a new region label for Prometheus metrics.\n\nOnce metrics were retrofitted, we were able to introduce service level indicators (SLIs) for both regional Cloud Run services and global load balancing. Here’s an example dashboard screenshot for a general Runway service:\n\n![dashboard screenshot for a general Runway service](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098671617.png)\n\n***Note:** Data is not actual production data and is only for illustration purposes.*\n\nAdditionally, we were able to update our service level objectives (SLOs) to support regions. As a result, service owners could be alerted when a specific region experiences an elevated error rate, or increase in response times.\n\n![screenshot of alerts](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098671617.png)\n\n***Note:** Data is not actual production data and is only for illustration purposes.*\n\nFor migrating AI Gateway, regional observability increased confidence and helped provide more visibility into new infrastructure. When building an internal platform with GitLab, we recommend supporting regional observability from the start.\n\n### Self-service regions\n\nThe Infrastructure department successfully performed the initial migration of multi-region support for AI Gateway in production with zero downtime. Given the risk associated with rolling out a large infrastructure migration, it was important to ensure the service continued working as expected.\n\nShortly afterwards, service owners began self-serving additional regions to meet the growth of customers. At the time of writing, [GitLab Duo](https://about.gitlab.com/gitlab-duo/) is available in six regions around the globe and counting. Service owners are able to configure the desired regions, and Runway will provide guardrails along the way in a scalable solution.\n\nAdditionally, three other internal services have already started using multi-region functionality on Runway. Application developers have entirely self-served functionality, which validates that we’ve provided a good platform experience for service owners. For a platform play, a scalable solution like Runway is considered a good outcome since the Infrastructure department is no longer a blocker.\n\n## What’s next for Runway\n\nBased on how quickly we could iterate to provide results for customers, the SaaS Platforms department has continued to invest in Runway. We’ve grown the Runway team with additional contributors, started evolving the platform runtime (e.g. Google Kubernetes Engine), and continue dogfooding with tighter integration in the product.\n\nIf you’re interested in learning more, feel free to check out [https://gitlab.com/gitlab-com/gl-infra/platform/runway](https://gitlab.com/gitlab-com/gl-infra/platform/runway).\n\n## More Building GitLab with GitLab\n- [Why there is no MLOps without DevSecOps](https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops/)\n- [Stress-testing Product Analytics](https://about.gitlab.com/blog/building-gitlab-with-gitlab-stress-testing-product-analytics/)\n- [Web API Fuzz Testing](https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow/)\n- [How GitLab.com inspired Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/)\n- [Expanding our security certification portfolio](https://about.gitlab.com/blog/building-gitlab-with-gitlab-expanding-our-security-certification-portfolio/)\n","engineering",[109,1034,1035,1036,724,1037,1038,1039,703,9],"CD","CI","inside GitLab","performance","google","git",{"slug":1041,"featured":91,"template":680},"building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features","content:en-us:blog:building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features.yml","Building Gitlab With Gitlab A Multi Region Service To Deliver Ai Features","en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features.yml","en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features",{"_path":1047,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1048,"content":1054,"config":1060,"_id":1062,"_type":14,"title":1063,"_source":16,"_file":1064,"_stem":1065,"_extension":19},"/en-us/blog/can-your-ci-cd-environment-support-ai-powered-devsecops",{"title":1049,"description":1050,"ogTitle":1049,"ogDescription":1050,"noIndex":6,"ogImage":1051,"ogUrl":1052,"ogSiteName":693,"ogType":694,"canonicalUrls":1052,"schema":1053},"Can your CI/CD environment support AI-powered DevSecOps? ","Unlock the value of AI-powered software development with a DevSecOps platform capable of supporting CI/CD hyperscale.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683522/Blog/Hero%20Images/AdobeStock_659839979.jpg","https://about.gitlab.com/blog/can-your-ci-cd-environment-support-ai-powered-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Can your CI/CD environment support AI-powered DevSecOps? \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2024-01-03\",\n      }",{"title":1049,"description":1050,"authors":1055,"heroImage":1051,"date":1057,"body":1058,"category":10,"tags":1059},[1056],"Darren Eastman","2024-01-03","Our customers are experiencing a significant increase in the efficiency and pace of software development with [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI capabilities powering your workflow. This will likely correspond to a rise in the two [DORA metrics](https://about.gitlab.com/solutions/value-stream-management/dora/) that measure velocity: deployment frequency and lead time for changes. However, what may not be as obvious is that the age of AI-powered software development has also ushered in a new era of large-scale computing for CI/CD jobs. Organizations will need to learn how to support this CI/CD hyperscale.\n\n## What is CI/CD hyperscale?\n\nAs a quick recap, a [CI/CD](https://about.gitlab.com/topics/ci-cd/) job is a unit of work that is executed on a host computing system each time a developer pushes a code change to a project repository. This core guiding principle has accelerated value creation by software development teams over the past few years. Instead of focusing on the ceremonies of legacy project management, today’s leading software development teams deliver value by adding small increments to a software product regularly – weekly, daily, and even hourly.\n\nThe CI/CD engine supports this modern pattern of software product development by enabling development teams to define automation to continuously build, test, and integrate any new software change. Some of our larger customers who have transformed their software development practices have already attained what we at GitLab have labeled as CI/CD hyperscale. That is, they are typically running 3 million or more CI/CD jobs per month.\n\n## How to support AI-fueled CI/CD growth\n\nWith the advent of AI-powered DevSecOps, we hypothesize that, starting in 2024, organizations will see a 2x increase per year in the number of CI/CD jobs run by development teams leveraging AI-assisted features across the software development lifecycle. Starting with 3 million jobs per month as the baseline and assuming a 2% growth rate per month, the chart below illustrates the potential impact in the growth of CI/CD jobs monthly due to efficiency gains with AI-powered DevSecOps.\n\n![cicd hyperscale chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683548/Blog/Content%20Images/image1.png)\n\nSo, what steps can you take to ensure your organization is positioned for success in this hyper-competitive new age of AI-powered DevSecOps? \n\nHere are a few pointers to get you started:\n\n- Analyze whether you have enabled your development teams – the creators of value – with the flexibility to adopt modern patterns in software product development. \n- Inventory the tools you use to support creative work, including project and task management.\n- Inventory the tools you use for software development and DevOps. Are you using multiple types of CI systems across your environment? If so, the next step is to gain a deeper understanding of why that is. \n- Create a plan to migrate all software development teams from multiple point solutions to one DevSecOps platform. \n\nYour software development teams may have built up extensive tooling using various CI systems and point solutions and may question the return on investment of a potentially time-consuming consolidation and migration effort. However, based on our internal data, customers adopting GitLab [realize payback in less than six months](https://about.gitlab.com/blog/why-the-market-is-moving-to-a-platform-approach-to-devsecops/).  \n\nIf you have already adopted the GitLab AI-powered DevSecOps Platform and are consolidating your platform engineering and software development processes, then you are well on your way to unlocking the value of AI-powered software development and having a solution capable of supporting CI/CD hyperscale. \n\nAs the pioneer of the integrated DevSecOps platform, we have been at the forefront of supporting CI/CD hyperscale for customers running CI/CD workloads on the fully managed GitLab SaaS CI/CD build environment or their own self-managed build infrastructure. That focused investment over the past decade has resulted in the development of the most scalable and flexible CI/CD engine – the core of the GitLab DevSecOps Platform. Look no further than our milestone of [more than 1 billion pipelines run on GitLab's SaaS-based DevSecOps Platform](https://about.gitlab.com/blog/one-billion-pipelines-cicd/). \n\nSo with GitLab CI and GitLab Runner, the ultimate CI/CD execution engine, the GitLab DevSecOps Platform is a solution that provides the foundation to continuously improve and transform your value creation processes while supporting the scale required to meet the competitive demands of the new age of AI.\n\n## Learn how to achieve CI/CD hyperscale\n\nIf you are new to GitLab and are interested in learning how we can help you transform your software development processes, [contact our sales team](https://about.gitlab.com/sales/) to help you with a custom demo and get you going on your adoption of AI-powered DevSecOps.\n",[109,703,9],{"slug":1061,"featured":6,"template":680},"can-your-ci-cd-environment-support-ai-powered-devsecops","content:en-us:blog:can-your-ci-cd-environment-support-ai-powered-devsecops.yml","Can Your Ci Cd Environment Support Ai Powered Devsecops","en-us/blog/can-your-ci-cd-environment-support-ai-powered-devsecops.yml","en-us/blog/can-your-ci-cd-environment-support-ai-powered-devsecops",{"_path":1067,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1068,"content":1074,"config":1080,"_id":1082,"_type":14,"title":1083,"_source":16,"_file":1084,"_stem":1085,"_extension":19},"/en-us/blog/chat-about-your-merge-request-with-gitlab-duo",{"title":1069,"description":1070,"ogTitle":1069,"ogDescription":1070,"noIndex":6,"ogImage":1071,"ogUrl":1072,"ogSiteName":693,"ogType":694,"canonicalUrls":1072,"schema":1073},"Chat about your merge request with GitLab Duo","Learn how to use AI-powered Chat to quickly understand complex merge requests by asking about implementation choices, potential risks, and architectural decisions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675536/Blog/Hero%20Images/blog-image-template-1800x945__2_.png","https://about.gitlab.com/blog/chat-about-your-merge-request-with-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Chat about your merge request with GitLab Duo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Torsten Linz\"}],\n        \"datePublished\": \"2024-11-22\",\n      }",{"title":1069,"description":1070,"authors":1075,"heroImage":1071,"date":1077,"body":1078,"category":10,"tags":1079},[1076],"Torsten Linz","2024-11-22","Managing a merge request (MR) is an integral part of collaborative development, involving navigating through code changes, discussions, and dependencies to ensure high-quality outcomes. Whether you’re reviewing someone else’s code or trying to make your own changes clearer, the new [GitLab Duo Chat](https://about.gitlab.com/gitlab-duo/) capability, available in GitLab Duo Enterprise, can help simplify your workflow. Now, you can have a conversation with GitLab Duo Chat about an MR, directly inside GitLab.\n\n## What GitLab Duo Chat brings to an MR workflow\n\nImagine jumping into a merge request titled \"Add logging to order processing.\" Your goal is to onboard yourself to the MR as quickly as possible and to review it. You can use GitLab Duo Chat to onboard yourself faster and understand critical questions to accelerate your review:\n\n* \"Do the logs cover all failure scenarios, or are there any gaps where an issue might not be traceable?\"  \n* “Are there any potential privacy concerns with the logged data?\"  \n* \"Why was logging added at these specific points in the order processing workflow, and how does it help with debugging or monitoring?\"\n\n![MR context example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675670/Blog/Content%20Images/MR_Context_example.png)\n\nThese are the kinds of questions that GitLab Duo Chat is ready to answer – questions that let you quickly understand the intentions behind the changes and uncover any potential risks before diving into the details. Instead of spending a lot of time trying to follow code paths or waiting on the author to reply to your questions, you can start getting answers right away, saving valuable time.\n\n## In-depth conversations about MRs\n\nThe magic of this new chat capability isn’t just in summarizing code – it’s in its ability to support in-depth conversations about the MR at hand. Let's assume the logging MR also includes notifications and refactoring. You can ask specific, insightful questions, such as:\n\n* “What are the potential network failure points introduced by refactoring the payment service into a microservice?”  \n* \"Were there any trade-offs made in terms of consistency or accuracy for better performance?\"  \n* \"How are failures in sending notifications handled? Are retries implemented?\"\n\nInstead of simply telling you what changes have been made, GitLab Duo Chat helps you understand *why* those changes were made, what risks are involved, and how to mitigate them. It lets you dig deep and explore the context behind every line of code, every architectural decision, and every change in behavior within the specific MR you are working on.\n\nAnd it doesn't end with that one answer. You can engage in a follow-up conversation to dig deeper or to explore. \n\n## An evolving conversation tool\n\nWe’re really excited about how GitLab Duo Chat is evolving to become a true conversational partner for MR authors and reviewers alike. GitLab Duo Chat is [aware of the MR description, discussions, the code diff, and metadata of a single MR](https://docs.gitlab.com/ee/user/gitlab_duo_chat/index.html#the-context-chat-is-aware-of). It’s like having an assistant who is well-versed in your MR and ready to explain any part of it – or even rewrite parts, if that’s what you need.\n\nWith GitLab Duo Chat, onboarding yourself to a complex MR or understanding a change in-depth is faster and more intuitive than ever before.\n\n## We need your feedback\n\nWe’re eager to hear how GitLab Duo Chat works for you. All feedback helps us refine this feature and make it even more useful. Please share your experiences by commenting on our [issue tracker](https://gitlab.com/gitlab-org/gitlab/-/issues/464587). Please include the questions you asked, the response you got, and whether it helped you move forward. Together, we can make GitLab Duo Chat an indispensable tool for every merge request!\n\nFor a deeper dive into how to use GitLab Duo Chat, check out our [documentation](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples#ask-about-a-specific-merge-request) or watch our introductory video below. Start your first conversation today and let us know what you think!\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4muvSFuWWL4?si=7W4mHWw2iUOzoTUz\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->  \n\n> Sample this new capability with [a free 60-day trial of GitLab Ultimate and GitLab Duo Enterprise](https://gitlab.com/-/trials/new).\n\n## Learn more about GitLab Duo Chat\n\n- [GitLab Duo Chat: Get to know productivity-boosting AI enhancements](https://about.gitlab.com/blog/gitlab-duo-chat-get-to-know-productivity-boosting-ai-enhancements/)\n- [GitLab Duo Chat, your at-the-ready AI assistant, is now generally available](https://about.gitlab.com/blog/gitlab-duo-chat-now-generally-available/)\n- [GitLab Duo Chat 101: Get more done on GitLab with our AI assistant](https://about.gitlab.com/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant/)",[9,482,704,724,725,746],{"slug":1081,"featured":6,"template":680},"chat-about-your-merge-request-with-gitlab-duo","content:en-us:blog:chat-about-your-merge-request-with-gitlab-duo.yml","Chat About Your Merge Request With Gitlab Duo","en-us/blog/chat-about-your-merge-request-with-gitlab-duo.yml","en-us/blog/chat-about-your-merge-request-with-gitlab-duo",{"_path":1087,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1088,"content":1093,"config":1098,"_id":1100,"_type":14,"title":1101,"_source":16,"_file":1102,"_stem":1103,"_extension":19},"/en-us/blog/code-suggestions-for-all-during-beta",{"title":1089,"description":1090,"ogTitle":1089,"ogDescription":1090,"noIndex":6,"ogImage":820,"ogUrl":1091,"ogSiteName":693,"ogType":694,"canonicalUrls":1091,"schema":1092},"Code Suggestions available to all GitLab tiers while in Beta","All users can acess Code Suggestions AI-assisted feature while it is in Beta.","https://about.gitlab.com/blog/code-suggestions-for-all-during-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Code Suggestions available to all GitLab tiers while in Beta\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Neha Khalwadekar\"}],\n        \"datePublished\": \"2023-05-16\",\n      }",{"title":1089,"description":1090,"authors":1094,"heroImage":820,"date":1095,"body":1096,"category":10,"tags":1097},[825],"2023-05-16","\n\n\u003Ci>This blog is the latest post in an ongoing series about GitLab’s journey to \u003Ca href=\"/blog/ai-ml-in-devsecops-series/\">build and integrate AI/ML into our DevSecOps platform\u003C/a>. The first blog post can be found \u003Ca href=\"/blog/what-the-ml-ai/\">here\u003C/a>. Throughout the series, we’ll feature blogs from our product, engineering, and UX teams to showcase how we’re infusing AI/ML into GitLab.\u003C/i>\n\nCode Suggestions is now available on GitLab.com for all users for free while the feature is in Beta. Teams can boost efficiency with the help of generative AI that suggests code while you're developing. We've extended language support from our initial six languages to now include 13 languages: C/C++, C#, Go, Java, JavaScript, Python, PHP, Ruby, Rust, Scala, Kotlin, and TypeScript. \n\nWe are making improvements to the Code Suggestions underlying AI model weekly to improve the quality of suggestions. Please remember that AI is non-deterministic, so you may not get the same suggestion week to week.\n\n## Privacy first\nCode Suggestions is built with privacy as a critical foundation. It keeps your proprietary source code secure within GitLab's enterprise cloud infrastructure, and this code isn't used as training data. Source code inference against the Code Suggestions model is not used to re-train the model. Learn about [data usage when using Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#code-suggestions-data-usage). \n\n## IDE support\nCode Suggestions is available in VS Code via the [GitLab Workflow extension](https://docs.gitlab.com/ee/user/project/repository/vscode.html#gitlab-workflow-extension-for-vs-code). We will soon support the GitLab WebIDE with GitLab 16.0. We are also working on adding [additional IDE support](https://gitlab.com/groups/gitlab-org/-/epics/10542) based on customer feedback, including JetBrains IntelliJ-based IDEs and Visual Studio support for code suggestions. We are also working to improve the user experience for how suggestions are presented and accepted within the IDEs to give developers more control over how the feature works. Additionally we're working to make it easier to setup Code Suggestions the first time and authenticate with GitLab.com \n\n## Self-managed support\nWe are also working to bring Code Suggestions to self-managed instances [via a secure connection to GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/10528). If you have unique requirements for your self-managed instances, we welcome you to express your interest in our [self-managed support issue](https://gitlab.com/gitlab-org/gitlab/-/issues/409183). Commenting on that issue will give you notifications as we post updates. \n\n## Enable Code Suggestions\nOur documentation details how to [enable Code Suggestions in VS Code](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-in-vs-code). Below is a quickstart video walkthrough:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/WnxBYxN2-p4\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Beta feature\nThis feature is in [Beta](https://docs.gitlab.com/ee/policy/experiment-beta-support.html#beta). Code Suggestions uses generative AI to suggest code while you're developing. Due to high demand, this feature will have unscheduled downtime and code suggestions in VS Code may be delayed. Code Suggestions may produce [low-quality or incomplete suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#model-accuracy-and-quality). We look forward to hearing your feedback. Beta users should read about the [known limitations](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#known-limitations). \n\nWe would love to hear about your experience and report issues in the [feedback issues](https://gitlab.com/gitlab-org/gitlab/-/issues/405152). \n\nCode Suggestions is just one of the ways we’re infusing GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI-assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-powered features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our \"[AI/ML in DevSecOps](https://about.gitlab.com/blog/ai-ml-in-devsecops-series/)\" series.\n\n_Disclaimer: This blog contains information about upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[703,725,704,9],{"slug":1099,"featured":6,"template":680},"code-suggestions-for-all-during-beta","content:en-us:blog:code-suggestions-for-all-during-beta.yml","Code Suggestions For All During Beta","en-us/blog/code-suggestions-for-all-during-beta.yml","en-us/blog/code-suggestions-for-all-during-beta",{"_path":1105,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1106,"content":1111,"config":1116,"_id":1118,"_type":14,"title":1119,"_source":16,"_file":1120,"_stem":1121,"_extension":19},"/en-us/blog/code-suggestions-improves-developer-productivity",{"title":1107,"description":1108,"ogTitle":1107,"ogDescription":1108,"noIndex":6,"ogImage":820,"ogUrl":1109,"ogSiteName":693,"ogType":694,"canonicalUrls":1109,"schema":1110},"How Code Suggestions can supercharge developers' daily productivity","Learn how you can use GitLab Code Suggestions to accelerate your development.","https://about.gitlab.com/blog/code-suggestions-improves-developer-productivity","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Code Suggestions can supercharge developers' daily productivity\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Neha Khalwadekar\"}],\n        \"datePublished\": \"2023-05-25\",\n      }",{"title":1107,"description":1108,"authors":1112,"heroImage":820,"date":1113,"body":1114,"category":10,"tags":1115},[825],"2023-05-25","\n\n\u003Ci>This blog is the latest post in an ongoing series about GitLab’s journey to \u003Ca href=\"/blog/ai-ml-in-devsecops-series/\">build and integrate AI/ML into our DevSecOps platform\u003C/a>. The first blog post can be found \u003Ca href=\"/blog/what-the-ml-ai/\">here\u003C/a>. Throughout the series, we’ll feature blogs from our product, engineering, and UX teams to showcase how we’re infusing AI/ML into GitLab.\u003C/i>\n\nIn the fast-paced world of software development, time is a precious resource. Developers constantly strive for ways to improve the productivity and efficiency of their workflows. Enter Code Suggestions, a large language model (LLM)-based technology that can transform the everyday developer experience. Let’s delve into the novel use cases of Code Suggestions, including: \n\n* simplifying operations\n* assisting new developers in language explorations\n* eliminating the need for frequent web searches by experienced developers\n\nAll of these are examples of how Code Suggestions can accelerate the daily developer experience. Let’s explore some specific examples of these use cases.\n\n## Import packages\nWith Code Suggestions, developers can quickly complete mundane tasks like importing Python packages. \n![Example](https://about.gitlab.com/images/blogimages/2023-05-25-Blog-code-suggestions-improves-developer-productivity/python_packages.gif)\n\n\n## Complete functions\nCode Suggestions can help developers complete functions and use those functions to write code. In the example below, we are defining the first and last name and then defining a full name. Now we can take this a step forward and use those defined functions in a user form. \n![Example](https://about.gitlab.com/images/blogimages/2023-05-25-Blog-code-suggestions-improves-developer-productivity/sample_functions.gif)\n\n\n## Fill in boilerplate\nDevelopers can use Code Suggestions to recommend boilerplate code such as connecting to a mySQL database.\n![Example](https://about.gitlab.com/images/blogimages/2023-05-25-Blog-code-suggestions-improves-developer-productivity/mysql-boiler.gif)\n\n\n## Building data frames\nData manipulation is a fundamental task for developers working with structured data. Code Suggestions can simplify the process of offering intelligent recommendations for DataFrame operations. Code Suggestions can assist in saving developers the time and effort of searching through documentation or experimenting with trial and error.\n![Example](https://about.gitlab.com/images/blogimages/2023-05-25-Blog-code-suggestions-improves-developer-productivity/dataframe.gif)\n\n\n## Generate unit tests\nWith Code Suggestions, developers can quickly write unit tests for the supported programming languages.\n![Example](https://about.gitlab.com/images/blogimages/2023-05-25-Blog-code-suggestions-improves-developer-productivity/unit-test.gif)\n\n\n## Try Code Suggestions today\nCode Suggestions is now available for free on GitLab.com for all users while the feature is in Beta. Teams can boost efficiency with the help of generative AI that suggests code while they're developing. We are improving the underlying AI model weekly to improve the [quality of suggestions](https://gitlab.com/groups/gitlab-org/-/epics/10562). Please remember that AI is non-deterministic, so you may not get the same suggestion from week to week. Also remember that any time you are using AI-generated code you should be automatically analyzing it with [code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html) and [security scanning](https://docs.gitlab.com/ee/user/application_security/), both of which are available natively in the GitLab platform. \n\nWe’ve extended [language support](https://gitlab.com/groups/gitlab-org/-/epics/10561) from our initial six languages to now include 13 languages: C/C++, C#, Go, Java, JavaScript, Python, PHP, Ruby, Rust, Scala, Kotlin, and TypeScript.\n\nRead more about these [improvements and what’s next.](https://about.gitlab.com/blog/code-suggestions-for-all-during-beta/)\n\nInterested in using these AI-powered features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our \"[AI/ML in DevSecOps](https://about.gitlab.com/blog/ai-ml-in-devsecops-series/)\" series.\n\n_Disclaimer: This blog contains information about upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[703,725,704,9],{"slug":1117,"featured":6,"template":680},"code-suggestions-improves-developer-productivity","content:en-us:blog:code-suggestions-improves-developer-productivity.yml","Code Suggestions Improves Developer Productivity","en-us/blog/code-suggestions-improves-developer-productivity.yml","en-us/blog/code-suggestions-improves-developer-productivity",{"_path":1123,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1124,"content":1130,"config":1138,"_id":1140,"_type":14,"title":1141,"_source":16,"_file":1142,"_stem":1143,"_extension":19},"/en-us/blog/continuous-machine-learning-development-with-gitlab-ci",{"title":1125,"description":1126,"ogTitle":1125,"ogDescription":1126,"noIndex":6,"ogImage":1127,"ogUrl":1128,"ogSiteName":693,"ogType":694,"canonicalUrls":1128,"schema":1129},"How machine learning ops works with GitLab and continuous machine learning","We share different machine learning use cases for CML projects using GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681802/Blog/Hero%20Images/gitlab_cml_dvc_banner.png","https://about.gitlab.com/blog/continuous-machine-learning-development-with-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How machine learning ops works with GitLab and continuous machine learning\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dr. Elle O'Brien\"}],\n        \"datePublished\": \"2020-12-01\",\n      }",{"title":1125,"description":1126,"authors":1131,"heroImage":1127,"date":1133,"body":1134,"category":1135,"tags":1136},[1132],"Dr. Elle O'Brien","2020-12-01","\n\nContinuous integration (CI) is standard practice in software development for speeding up development cycles, and for keeping them short and painless. CI means making small commits, often, and automating tests so every commit is a release candidate.\n\nWhen a project involves machine learning (ML), though, new challenges arise: Traditional [version control systems](/topics/version-control/) (like Git) that are key to CI struggle to manage large datasets and models.\nFurthermore, typical pass-fail tests are too coarse for understanding ML model performance – you might need to consider how several metrics, like accuracy, sensitivity, and specificity, are affected by changes in your code or data.\nData visualizations like confusion matrices and loss plots are needed to make sense of the high-dimensional and often unintuitive behavior of models.\n\n## Continuous machine learning: an introduction\n\n[Iterative.ai](https://iterative.ai), the team behind the popular open source version control system for ML projects [DVC](https://dvc.org) (short for Data Version Control),\nhas recently released another open source project called [CML](https://cml.dev), which stands for continuous machine learning.\nCML is our approach to adapting powerful CI systems like GitLab CI to common data science and ML use cases, including:\n\n- Automatic model training\n- Automatic model and dataset testing\n- Transparent and rich reporting about models and datasets (with data viz and metrics) in a merge request (MR)\n\n## Your first continuous machine learning report\n\nCML helps you put tables, data viz, and even sample outputs from models into comments on your MRs, so you can review datasets and models like code.\nLet's see how to produce a basic report – we'll train an ML model using GitLab CI, and then report a model metric and confusion matrix in our MR.\n\n![Confusion Matrix](https://about.gitlab.com/images/blogimages/cml_confusion_matrix.jpg){: .shadow.medium.center}\nConfusion matrix\n{: .note.text-center}\n\nTo make this report, our `.gitlab-ci.yml` contains the following workflow:\n\n```\n# .gitlab-ci.yml\nstages:\n    - cml_run\n\ncml:\n    stage: cml_run\n    image: dvcorg/cml-py3:latest\n\n    script:\n        - pip3 install -r requirements.txt\n        - python train.py\n\n        - cat metrics.txt >> report.md\n        - echo >> report.md\n        - cml-publish confusion_matrix.png --md --title 'confusion-matrix' >> report.md\n        - cml-send-comment report.md\n\n```\n\nThe entire [project repository is available here](https://gitlab.com/iterative.ai/cml-base-case/).\nThe steps consist of the following:\n\n- **Train**: This is a classic training step where we install requirements (like `pip` packages) and run the training script.\n- **Write a CML report**: Produced metrics are appended to a markdown report.\n- **Publish a CML report**: CML publishes an image of the confusion matrix with the embedded metrics to your GitLab MR.\n\nNow, when you and your teammates are deciding if your changes have had a positive effect on your modeling goals,\nyou have a dashboard of sorts to review. Plus, this report is linked by Git to your exact project version (data and code) and the runner used for training and the logs from that run.\n\nThis is the simplest use case for achieving continuous machine learning with CML and GitLab. In the next section we'll look at a more complex use case.\n\n## CML with DVC for data version control\n\nIn machine learning projects, you need to track changes in your datasets as well as changes in your code.\nSince Git is frequently a poor fit for managing large files, we can use [DVC](https://dvc.org) to link remote datasets to your CI system.\n\n```\n# .gitlab-ci.yml\nstages:\n  - cml_run\n\ncml:\n  stage: cml_run\n  image: dvcorg/cml-py3:latest\n  script:\n    - dvc pull data\n\n    - pip install -r requirements.txt\n    - dvc repro\n\n    # Compare metrics to master\n    - git fetch --prune\n    - dvc metrics diff --show-md master >> report.md\n    - echo >> report.md\n\n    # Visualize loss function diff\n    - dvc plots diff\n      --target loss.csv --show-vega master > vega.json\n    - vl2png vega.json | cml-publish --md >> report.md\n    - cml-send-comment report.md\n```\n\nThe entire [project is available here](https://gitlab.com/iterative.ai/cml-dvc-case).\nIn this workflow, we have additional steps that use DVC to pull a training dataset, run an experiment, and then use CML to publish the report in your MR.\n\n![CML with DVC](https://about.gitlab.com/images/blogimages/cml_dvc.jpg){: .shadow.medium.center}\nCML with DVC\n{: .note.text-center}\n\nFor more details about ML data versioning and tracking, check out the [DVC documentation](https://dvc.org/doc).\n\n## Summary\n\nWe made CML to adapt CI to machine learning, so data science teams can enjoy benefits such as:\n\n- Your code, data, models, and training infrastructure (hardware and software environment) will be Git versioned.\n- You’re automating work, testing frequently, and getting fast feedback (with visual reports if you use CML). In the long run, this will almost certainly speed up your project’s development.\n- CI systems make your work visible to everyone on your team. No one has to search very hard to find the code, data, and model from your best run.\n\n### About the guest author\n\n_Dr. Elle O'Brien is a Ph.D data scientist at iterative.ai and co-creator of [CML](https://cml.dev) project. She is also a lecturer at [UMSI](https://www.si.umich.edu/)._\n","news",[109,267,231,1137,9],"open source",{"slug":1139,"featured":6,"template":680},"continuous-machine-learning-development-with-gitlab-ci","content:en-us:blog:continuous-machine-learning-development-with-gitlab-ci.yml","Continuous Machine Learning Development With Gitlab Ci","en-us/blog/continuous-machine-learning-development-with-gitlab-ci.yml","en-us/blog/continuous-machine-learning-development-with-gitlab-ci",{"_path":1145,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1146,"content":1152,"config":1158,"_id":1160,"_type":14,"title":1161,"_source":16,"_file":1162,"_stem":1163,"_extension":19},"/en-us/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci",{"title":1147,"description":1148,"ogTitle":1147,"ogDescription":1148,"noIndex":6,"ogImage":1149,"ogUrl":1150,"ogSiteName":693,"ogType":694,"canonicalUrls":1150,"schema":1151},"Develop C++ unit testing with Catch2, JUnit, and GitLab CI","Learn how to set up, write, and automate C++ unit tests using Catch2 with GitLab CI/CD. See examples from a working air quality app project and AI-powered help from GitLab Duo.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659684/Blog/Hero%20Images/AdobeStock_479904468__1_.jpg","https://about.gitlab.com/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Develop C++ unit testing with Catch2, JUnit, and GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2024-07-02\",\n      }",{"title":1147,"description":1148,"authors":1153,"heroImage":1149,"date":1154,"body":1155,"category":1156,"tags":1157},[698],"2024-07-02","Continuous integration (CI) and automated testing are important DevSecOps workflows for software developers to detect bugs early, improve code quality, and streamline their development processes. \n\nIn this tutorial, you'll learn how to set up unit testing on a `C++` project with [Catch2](https://github.com/catchorg/Catch2) and GitLab CI for continuous integration. You'll also see how the AI-powered features of [GitLab Duo](https://about.gitlab.com/gitlab-duo/) can help. We’ll use [an air quality monitoring application](https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/air-quality-app) as our reference project.\n\n## Prerequisites\n\n- Ensure you have [CMake](https://cmake.org/ \"CMake\") installed on your machine. \n- A modern `C++` compiler such as GCC or Clang is required. \n- An API key from [OpenWeatherMap](https://openweathermap.org/api) - requires signing up for a free account (1,000/calls per day are included for free). \n\n## Set up the application for testing\n\nThe reference project we’ll be using for demonstrating testing in this blog post is an air quality monitoring application that fetches air quality data from the OpenWeatherMap API based on the U.S zip codes only provided by the user.\n\nHere are the steps to set up the application for testing:\n\n1. Fork the [the reference project](https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/air-quality-app) and clone the fork to your local environment.\n\n2. Generate an API key from  [OpenWeatherMap](https://openweathermap.org/) and export it into the environment. \n\n```shell\nexport API_KEY=\"YOURAPIKEY_HERE\"\n```\n\n3. Alternatively, you can add the key into your `.env` configuration, and source it with `source ~/.env`, or use a different mechanism to populate the environment.\n\n4. Compile and build the project code with the following instructions:\n\n```cpp\ncmake -S . -B build\ncmake --build build\n```\n\n5. Run the application using the executable and passing in a U.S zip code (90210 as an example): \n\n```cpp\n./build/air_quality_app 90210\n```\n\nHere’s an example of what running the program will look like in your terminal:  \n\n```bash\n❯ ./build/air_quality_app 90210\nAir Quality Index (AQI) for Zip Code 90210: 2 (Fair)\n```\n\n## Install Catch2\n\nNow that the application is set up and working, let's start working on adding testing using Catch2. Catch2 is a modern, `C++-native` testing framework for unit tests. \n\nYou can also ask GitLab Duo Chat within your IDE for an introduction to getting started with Catch2 as a `C++` testing framework. GitLab Duo Chat will provide getting started steps as well as an example test: \n\n![GitLab Duo Chat starting steps and example test](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676997/Blog/Content%20Images/1.duo-chat-installing-catch2.png)\n\n1. First navigate to your project’s root directory and create an externals folder using the `mkdir` command.\n\n```shell\nmkdir externals\n```\n\n2. There are several ways to install Catch2 via [its CMake integration](https://github.com/catchorg/Catch2/blob/devel/docs/cmake-integration.md#top). We will use the option of installing it as a submodule and including it as part of the source code to simplify dependency management. To add Catch2 to your project in the `externals` folder: \n\n```shell\ngit submodule add https://github.com/catchorg/Catch2.git externals/Catch2\ngit submodule update --init --recursive\n```\n\n3. Update `CMakeLists.txt` to include Catch2’s directory as a subdirectory. This allows CMake to find and build Catch2 as a part of our project. \n\n```cpp\n# Assuming Catch2 in externals/Catch2\nadd_subdirectory(externals/Catch2)\n```\n\n4. Create a `tests.cpp` file in your project root to write our tests to: \n\n```shell\ntouch tests.cpp\n```\n\n5. Update `CMakeLists.txt` Link against Catch2. When defining your test executable in CMake, link it against Catch2:\n\n```cpp\n# Add tests executable and link it to Catch2\nadd_executable(tests test.cpp)\ntarget_link_libraries(tests PRIVATE Catch2::Catch2WithMain)\n```\n\n## Structure the project for testing\n\nBefore we start writing our tests, we should separate our application logic into separate files in order to maintain and test our code more efficiently. At the end of this section we should have:\n\n```\nmain.cpp containing only the main() function and application setup\nincludes/functions.cpp containing all functional code such as API calls and data processing: \nincludes/functions.h containing the declarations for the functions defined in functions.cpp.  It needs to define the preprocessor macro guards, and include all necessary headers. \n```\n\nApply the following changes to the files: \n\n1. `main.cpp`\n\n```cpp\n#include \u003Ciostream>\n#include \"functions.h\"\n\nint main(int argc, char* argv[]) {\n   if (argc \u003C 2) {\n       std::cerr \u003C\u003C \"Usage: \" \u003C\u003C argv[0] \u003C\u003C \" \u003CZip Code>\" \u003C\u003C std::endl;\n       return 1;\n   }\n\n   std::string zipCode = argv[1];\n   std::string apiKey = getApiKey();\n   if (apiKey.empty()) {\n       std::cerr \u003C\u003C \"API key not found.\" \u003C\u003C std::endl;\n       return 1;\n   }\n\n   auto [lat, lon] = geocodeZipcode(zipCode, apiKey);\n   if (lat == 0 && lon == 0) {\n       std::cerr \u003C\u003C \"Failed to geocode zipcode.\" \u003C\u003C std::endl;\n       return 1;\n   }\n\n   std::string response = fetchAirQuality(lat, lon, apiKey);\n   std::string airQualityInfo = parseAirQualityResponse(response);\n\n   std::cout \u003C\u003C \"Air Quality Index for Zip Code \" \u003C\u003C zipCode \u003C\u003C \": \" \u003C\u003C airQualityInfo \u003C\u003C std::endl;\n\n   return 0;\n}\n```\n\n2. Create a `functions.h:` in the `includes` folder: \n\n```cpp\n#ifndef FUNCTIONS_H\n#define FUNCTIONS_H\n\n#include \u003Cstring>\n#include \u003Cutility>\n#include \u003Cvector>\n\n// Declare the function prototype\nstd::string httpRequest(const std::string& url);\nbool loadEnvFile(const std::string& filename);\nstd::string getApiKey();\nstd::pair\u003Cdouble, double> geocodeZipcode(const std::string& zipCode, const std::string& apiKey);\nstd::string fetchAirQuality(double lat, double lon, const std::string& apiKey);\nstd::string parseAirQualityResponse(const std::string& response);\n\n#endif\n```\n\n3. Create a `functions.cpp` in the `includes` folder: \n\n```cpp\n#include \"functions.h\"\n#include \u003Cfstream>\n#include \u003Celnormous/HTTPRequest.hpp>\n#include \u003Cnlohmann/json.hpp>\n#include \u003Ciostream>\n#include \u003Ccstdlib> // For getenv\n\nstd::string httpRequest(const std::string& url) {\n   try {\n       http::Request request{url};\n       const auto response = request.send(\"GET\");\n       return std::string{response.body.begin(), response.body.end()};\n   } catch (const std::exception& e) {\n       std::cerr \u003C\u003C \"Request failed, error: \" \u003C\u003C e.what() \u003C\u003C std::endl;\n       return \"\";\n   }\n}\nstd::string getApiKey() {\n   const char* envApiKey = std::getenv(\"API_KEY\");\n   if (envApiKey) {\n       return std::string(envApiKey);\n   }\n   // If the environment variable is not set, fallback to the config file\n   std::ifstream configFile(\"config.txt\");\n   std::string line;\n   if (getline(configFile, line)) {\n       return line.substr(line.find('=') + 1);\n   }\n   return \"\";\n}\n\nstd::pair\u003Cdouble, double> geocodeZipcode(const std::string& zipCode, const std::string& apiKey) {\n   std::string url = \"http://api.openweathermap.org/geo/1.0/zip?zip=\" + zipCode + \",US&appid=\" + apiKey;\n   std::string response = httpRequest(url);\n   try {\n       auto json = nlohmann::json::parse(response);\n       if (json.contains(\"lat\") && json.contains(\"lon\")) {\n           double lat = json[\"lat\"];\n           double lon = json[\"lon\"];\n           return {lat, lon};\n       } else {\n           std::cerr \u003C\u003C \"Geocode response missing 'lat' or 'lon' fields: \" \u003C\u003C response \u003C\u003C std::endl;\n       }\n   } catch (const nlohmann::json::parse_error& e) {\n       std::cerr \u003C\u003C \"Failed to parse geocode response: \" \u003C\u003C e.what() \u003C\u003C \" - Response: \" \u003C\u003C response \u003C\u003C std::endl;\n   }\n   return {0, 0};\n}\n\nstd::string fetchAirQuality(double lat, double lon, const std::string& apiKey) {\n   std::string url = \"http://api.openweathermap.org/data/2.5/air_pollution?lat=\" + std::to_string(lat) + \"&lon=\" + std::to_string(lon) + \"&appid=\" + apiKey;\n   std::string response = httpRequest(url);\n   return response;\n}\n\nstd::string parseAirQualityResponse(const std::string& response) {\n   try {\n       auto json = nlohmann::json::parse(response);\n       if (json.contains(\"list\") && !json[\"list\"].empty() && json[\"list\"][0].contains(\"main\")) {\n           int aqi = json[\"list\"][0][\"main\"][\"aqi\"];\n           std::string aqiCategory;\n           switch (aqi) {\n               case 1:\n                   aqiCategory = \"Good\";\n                   break;\n               case 2:\n                   aqiCategory = \"Fair\";\n                   break;\n               case 3:\n                   aqiCategory = \"Moderate\";\n                   break;\n               case 4:\n                   aqiCategory = \"Poor\";\n                   break;\n               case 5:\n                   aqiCategory = \"Very Poor\";\n                   break;\n               default:\n                   aqiCategory = \"Unknown\";\n                   break;\n           }\n           return std::to_string(aqi) + \" (\" + aqiCategory + \")\";\n       } else {\n           return \"No AQI data available\";\n       }\n   } catch (const std::exception& e) {\n       std::cerr \u003C\u003C \"Failed to parse JSON response: \" \u003C\u003C e.what() \u003C\u003C std::endl;\n       return \"Error parsing AQI data\";\n   }\n}\n\n```\n\n4. Now that we have separated the source files, we also need to update our `CMakeLists.txt` to include `functions.cpp` in the `add_executable()` calls:\n\n```cpp\ncmake_minimum_required(VERSION 3.14)\nproject(air-quality-app)\n\n# Set the C++ standard for the project\nset(CMAKE_CXX_STANDARD 17)\nset(CMAKE_CXX_STANDARD_REQUIRED ON)\nset(CMAKE_CXX_EXTENSIONS OFF)\n\ninclude_directories(${CMAKE_SOURCE_DIR}/includes)\n\n# Define the main program executable\nadd_executable(air_quality_app main.cpp includes/functions.cpp)\n\n# Assuming Catch2 in externals/Catch2\nadd_subdirectory(externals/Catch2)\n\n# Add tests executable and link it to Catch2\nadd_executable(tests tests.cpp includes/functions.cpp)\ntarget_link_libraries(tests PRIVATE Catch2::Catch2WithMain)\n```\n\nTo verify that the changes are working, regenerate the CMake configuration and rebuild the source code with the following commands. The build will take longer now that we're compiling Catch2 files. \n\n```shell\nrm -rf build # delete existing build files\ncmake -S . -B build \ncmake --build build  \n```\n\nYou should be able to run the application without any errors.\n\n```shell\n./build/air_quality_app 90210\n```\n\n## Write tests in Catch2  \n\nCatch2 tests are made up of [macros and assertions](https://github.com/catchorg/Catch2/blob/devel/docs/assertions.md). Macros in Catch2 are used to define test cases and sections within those test cases. They help in organizing and structuring the tests. Assertions are used to verify that the code behaves as expected. If an assertion fails, the test case will fail, and Catch2 will report the failure.\n\nLet’s review a basic test scenario for an addition function to understand. Note: This test is read-only, as an example. \n\n```cpp\nint add(int a, int b) {\n   return a + b;\n}\n\nTEST_CASE(\"Addition works correctly\", \"[math]\") {\n   REQUIRE(add(1, 1) == 2);  // Test passes if 1+1 equals 2\n   REQUIRE(add(2, 2) != 5);  // Test passes if 2+2 does not equal 5\n}\n```\n\n- Each test begins with the `TEST_CASE` macro, which defines a test case container. The macro accepts two parameters: a string describing the test case and optionally a second string for tagging the test for easy filtering.\n- Tests are also composed of assertions, which are statements that check if conditions are true. Catch2 provides macros for assertion that include `REQUIRE`, which aborts the current test if the assertion fails, and `CHECK`, which logs the failure but continues with the current test.\n\n### Prepare to write tests with Catch2\n\nTo test the API retrieval functions in our air quality application, we’ll be using mock API requests. Mock API testing is a technique used to test how your application will interact with an external API without making any real API calls. Instead of sending requests to a live API server, we can simulate the responses using predefined data. Mock requests allow us to control the input data and specify exactly what the API would return for different requests, making sure that our tests aren't affected by changes in the real API responses or unexpected data. This also makes it easier for us to simulate and catch different failures.\n\nIn our `tests.cpp` file, let’s define the following function to run mock API requests.   \n\n```cpp\n#include \"includes/functions.h\"\n#include \u003Ccatch2/catch_test_macros.hpp>\n#include \u003Cstring>\n\n// Mock HTTP request function that simulates API responses\nstd::string mockHttpRequest(const std::string& url) {\n   if (url.find(\"geo\") != std::string::npos) {\n       // Mock response for geocoding\n       return R\"({\"lat\": 40.7128, \"lon\": -74.0060})\"; \n   } else if (url.find(\"air_pollution\") != std::string::npos) {\n       // Mock response for air quality\n       return R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\";\n   }\n   // Default mock response for unmatched endpoints\n   return \"{}\";\n}\n// Overriding the actual httpRequest function with the mockHttpRequest for testing\nstd::string httpRequest(const std::string& url) {\n   return mockHttpRequest(url);\n}\n```\n\n- This function simulates HTTP requests and returns predefined JSON responses based on the URL given as input. \n- It also checks the URL to determine which type of data is being requested based on the functionality of the application (geocoding, air pollution, or forecast data). If the URL doesn’t match the expected endpoint, it returns an empty JSON object. \n\nDon't compile the code just yet, as you'll see a linker error. Since we're overriding the original `httpRequest` function with our mock function for testing, we'll need a preprocessor macro to enable conditional compilation - indicating which `httpRequest` function should run when we're compiling tests. \n\n#### Define a preprocessor macro for testing  \n\nBecause we’ve overridden `httpRequest` in our `tests.cpp`, we need to exclude that code from `functions.cpp` when we’re testing. When building tests, we may need to ensure that certain parts of our code behave differently or are excluded. We can do this by defining a preprocessor macro `TESTING` which enables conditional compilation, allowing us to selectively include or exclude code when compiling the test target:  \n\nWe define the `TESTING` macro in our `CMakeLists.txt` at the end:  \n\n```cpp\n# Define TESTING macro for this target\ntarget_compile_definitions(tests PRIVATE TESTING)\n```\n\nAnd add the macro wrapper in  `functions.cpp` around the original `httpRequest` function:  \n\n```cpp\n#ifndef TESTING  // Exclude this part when TESTING is defined\nstd::string httpRequest(const std::string& url) {\n   try {\n       http::Request request{url};\n       const auto response = request.send(\"GET\");\n       return std::string{response.body.begin(), response.body.end()};\n   } catch (const std::exception& e) {\n       std::cerr \u003C\u003C \"Request failed, error: \" \u003C\u003C e.what() \u003C\u003C std::endl;\n       return \"\";\n   }\n}\n#endif\n```\n\nRegenerate the CMake configuration and rebuild the source code to verify it works.\n\n```shell\ncmake --build build  \n```\n\n### Write the first tests \n\nNow, let’s write some tests for our air quality application.\n\n#### Test 1: Verify API key retrieval \n\nThis test ensures that the `getApiKey` function retrieves the API key correctly from the environment variable or the configuration file. Add the test case to our `tests.cpp`:\n\n```cpp\n\nTEST_CASE(\"API Key Retrieval\", \"[api]\") {\n   // Set the API_KEY environment variable for testing\n   setenv(\"API_KEY\", \"test_key\", 1);\n   // Test if the key is retrieved correctly\n   REQUIRE(getApiKey() == \"test_key\");\n}\n```\n\nYou can verify that this tests passes by rebuilding the code and running the tests:\n\n```shell\ncmake --build build\n./build/tests\n```\n\n#### Test 2: Geocode the zip code\n\nThis test ensures that the `geocodeZipcode` function returns the correct latitude and longitude for a given zip code using the mock API response function we set up earlier. The  `geocodeZipcode` function is supposed to hit an API that returns geographic coordinates based on a zip code. \n\nIn `tests.cpp`, add this test case for the zip code 90210: \n\n```cpp\nTEST_CASE(\"Geocode Zip code\", \"[geocode]\") {\n   std::string apiKey = \"test_key\";\n   std::pair\u003Cdouble, double> coordinates = geocodeZipcode(\"90210\", apiKey);\n   // Check latitude\n   REQUIRE(coordinates.first == 40.7128);\n   // Check longitude \n   REQUIRE(coordinates.second == -74.0060);\n}\n```\n\nThe purpose of this test is to verify that the function `geocodeZipcode` can correctly parse the latitude and longitude from the API response. By hardcoding the expected response, we ensure that the test environment is controlled and predictable.\n\n #### Test 3: Air quality API test\n\nThis test ensures that the `fetchAirQuality` function correctly fetches air quality data using the mock API response function we set up earlier. It verifies that the function constructs the API request properly, sends it, and accurately parses the air quality index (AQI) from the mock JSON response. This validation helps ensure that the overall process of fetching and interpreting air quality data works as intended.\n\n```cpp\nTEST_CASE(\"Fetch Air Quality\", \"[airquality]\") {\n   std::string apiKey = \"test_key\";\n   double lat = 40.7128;\n   double lon = -74.0060;\n   std::string response = fetchAirQuality(lat, lon, apiKey);\n   // Check the response\n   REQUIRE(response == R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\");\n}\n```\n\n## Build and run the tests\n\nTo  build and compile our application, we'll use the same CMake commands as before:\n\n```cpp\ncmake -S . -B build\ncmake --build build\n\n```\n\nAfter building, we can run our tests by executing the test binary:  \n\n```cpp\n./build/tests\n\n```\n\nRunning this command will execute all defined tests, and you will see output indicating whether each test has passed or failed.\n\n![Output showing pass/fail of tests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/2.running-catch2-tests.png)\n\n## Set up GitLab CI/CD\n\nTo automate the testing process each time we push some new code to our repository, let’s set up [GitLab CI/CD](https://about.gitlab.com/topics/ci-cd/). Create a new `.gitlab-ci.yml` configuration file in the root directory. \n\n```yaml\nimage: gcc:latest\n\nvariables:\n GIT_SUBMODULE_STRATEGY: recursive\n\nstages:\n - build\n - test\n\nbefore_script:\n - apt-get update && apt-get install -y cmake\n\ncompile:\n stage: build\n script:\n   - cmake -S . -B build\n   - cmake --build build\n artifacts:\n   paths:\n     - build/\n\ntest:\n stage: test\n script:\n   - ./build/tests --reporter junit -o test-results.xml\n artifacts:\n   reports:\n     junit: test-results.xml\n```\n\nThis CI/CD configuration will compile both the main application and the test suite, then run the tests, generating a JUnit XML report which GitLab uses to display the test results.  \n\n- In `before_script`, we added an installation for `cmake`, and `git submodule sync --recursive` which initializes and updates our submodules (catch2). \n- In the `test` stage, `--reporter junit -o test-results.xml` specifies that the test results should be treated as a JUnit report which allows GitLab CI to display results in the UI. This is super helpful when you have several tests in your application.  \n\nWe also need to [add an environmental variable](https://docs.gitlab.com/ee/ci/variables/#define-a-cicd-variable-in-the-ui) with the `API_KEY` in project settings on GitLab.\n\nDon’t forget to add all new files to Git, and commit and push the changes in a new MR:\n\n```shell\ngit checkout -b tests-catch2-cicd\n\ngit add includes/functions.{h,cpp} tests.cpp .gitlab-ci.yml \ngit add CMakeLists.txt main.cpp \n\ngit commit -vm “Add Catch2 tests and CI/CD configuration”\ngit push \n```\n\n## View the test report\n\nAfter pushing our code changes, we can review the results of our tests in the GitLab UI in the Pipeline view in the `Tests` tab:\n\n![GitLab pipeline view shows test results](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/2.0-passed-tests-UI.png)\n\n## Simulate a test failure\n\nTo demonstrate how our UI will handle test failures, we can intentionally introduce a bug into our code and observe the resulting behavior. \n\nLet's modify our `parseAirQualityResponse` function to introduce an error. We can change the AQI category for an AQI value of 2 from \"Fair\" to \"Poor.\" This change will cause the related test to fail, allowing us to see the test failure in the GitLab UI.\n\nIn `functions.cpp`, find the `parseAirQualityResponse` function and modify the switch statement for case `2` to set the `Poor` value instead of `Fair`:\n\n```cpp\n               // Intentional bug:\n               case 2:\n                   aqiCategory = \"Poor\";\n                   break;\n```\n\nIn tests.cpp, add a new test case that directly checks the output of the `parseAirQualityResponse` function. This test ensures that the `parseAirQualityResponse` function correctly parses and categorizes the air quality data from the mock API response. This function takes a JSON response, extracts the AQI value, and translates it into a human-readable category.\n\n```cpp\n\nTEST_CASE(\"Parse Air Quality Response\", \"[airquality]\") {\n   std::string mockResponse = R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\";\n   std::string result = parseAirQualityResponse(mockResponse);\n   // This should fail due to the intentional bug\n   REQUIRE(result == \"2 (Fair)\");\n}\n\n```\n\nCommit the changes, and push them into the MR. Open the MR in your browser. \n\nBy introducing an intentional bug in this function, we can see how a test failure is reported in GitLab's pipelines UI. We must add, commit, and push the changes to our repository to view the test failure in the pipeline. \n\n![Simulated test failure](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/2.1-failed-test-simulation.png)\n\n![Details of the simulated failed test](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/2.2-failed-test-simulation-details.png)\n\nOnce we've verified this simulated test failure, we can use `git revert` to roll back that commit. \n\n```shell\ngit revert\n```\n\n## Add and test a new feature\n\nLet’s put what you've learned together by creating a new feature in the air quality application and then writing a test for that feature using Catch2. The new feature will fetch the current weather forecast for the provided zip code.\n\nFirst, we'll define a `Weather` struct and add the function prototype in our `functions.h` file (inside the `#endif`):\n\n```cpp\n\nstruct Weather {\n   std::string main;\n   std::string description;\n   double temperature;\n};\n\nWeather getCurrentWeather(const std::string& apiKey, double lat, double lon);\n```\n\nThen, we implement the `getCurrentWeather` function in `functions.cpp`. This function calls the OpenWeatherMap API to retrieve the current weather and parses the JSON response. This code was generated using [GitLab Duo](https://about.gitlab.com/gitlab-duo/). If you start typing `Weather getCurrentWeather(const std::string& apiKey, double lat, double lon) {` to complete the function, GitLab Duo will provide the function contents for you, line by line. \n\n![GitLab Duo completing the function contents](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/3.get-current-weather-function-completion.png)\n\nHere's what your `getCurrentWeather()` function can look like: \n\n```cpp\n\nWeather getCurrentWeather(const std::string& apiKey, double lat, double lon) {\n   std::string url = \"http://api.openweathermap.org/data/2.5/weather?lat=\" + std::to_string(lat) + \"&lon=\" + std::to_string(lon) + \"&appid=\" + apiKey;\n   std::string response = httpRequest(url);\n   auto json = nlohmann::json::parse(response);\n   Weather weather;\n   if (!json.is_null()) {\n       weather.main = json[\"weather\"][0][\"main\"];\n       weather.description = json[\"weather\"][0][\"description\"];\n       weather.temperature = json[\"main\"][\"temp\"];\n   }\n   return weather;\n}\n```\n\nAnd, finally, we update our `main.cpp` file in the main function to output the current forecast (and converting Kelvin to Celsius for the output):  \n\n```cpp\n   Weather currentWeather = getCurrentWeather(apiKey, lat, lon);\n   if (currentWeather.main.empty()) {\n       std::cerr \u003C\u003C \"Failed to fetch current weather.\" \u003C\u003C std::endl;\n       return 1;\n   }\n\n   std::cout \u003C\u003C \"Current Weather: \" \u003C\u003C currentWeather.main \u003C\u003C \", \" \u003C\u003C currentWeather.description\n       \u003C\u003C \", temperature \" \u003C\u003C currentWeather.temperature - 273.15 \u003C\u003C \" °C\" \u003C\u003C std::endl;\n```\n\nWe can confirm that our new feature is working by building and running the application:  \n\n```shell\ncmake --build build\n./build/air_quality_app \n```\n\nAnd we should see the following output or similar in case the weather is different on the day the code is run :)\n\n```\nAir Quality Index for Zip Code 90210: 2 (Poor)\nCurrent Weather: Clouds, broken clouds, temperature 23.2 °C\n```\n\nWith all new functionality, there should be testing! We can also write a test to check whether the application is fetching and parsing a weather forecast correctly. This test checks that the function returns a list containing the correct number of forecast entries and that each entry has accurate data regarding time and temperature.\n\n```cpp\nTEST_CASE(\"Current Weather functionality\", \"[api]\") {\n   auto weather = getCurrentWeather(\"dummyApiKey\", 40.7128, -74.0060);\n   // Ensure main weather description is not empty\n   REQUIRE_FALSE(weather.main.empty());\n   // Validate that temperature is a reasonable value\n   REQUIRE(weather.temperature > 0); \n}\n```\n\nWe’ll also have to update our `mockHTTPRequest` function in `tests.cpp` to account for this new test. Modify the if-condition with a new else-if branch checking for the `weather` string in the URL:  \n\n```cpp\n// Mock HTTP request function that simulates API responses\nstd::string mockHttpRequest(const std::string &url)\n{\n   if (url.find(\"geo\") != std::string::npos)\n   {\n       // Mock response for geocoding\n       return R\"({\"lat\": 40.7128, \"lon\": -74.0060})\";\n   }\n   else if (url.find(\"air_pollution\") != std::string::npos)\n   {\n       // Mock response for air quality\n       return R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\";\n   }\n   else if (url.find(\"weather\") != std::string::npos)\n   {\n       // Mock response for current weather\n       return R\"({\n          \"weather\": [{\"main\": \"Clear\", \"description\": \"clear sky\"}],\n          \"main\": {\"temp\": 298.55}\n      })\";\n   }\n   return \"{}\";\n}\n```\n\nAnd verify that our tests are working by rebuilding and running our tests:  \n\n```shell\ncmake --build build \n./build/tests\n```\n\nAll tests should pass, including the new one for Current Weather Functionality. \n\n## Optimize tests.cpp with sections\n\nTo better organize our tests as the project grows and categorize each functionality, we can use Catch2’s `SECTION` macro. The `SECTION` macro allows you to define logically separate test scenarios within a single test case, providing a clean way to test different behaviors or conditions without requiring multiple separate test cases or multiple files. This approach keeps related tests bundled together and also improves test maintainability by allowing shared setup code to be executed repeatedly for each section.\n\nSince some of our functionality is preprocessing data to retrieve information, let’s section our tests as such:\n- preprocessing steps: \n\t- API key validation\n\t- geocoding validation\n-  API data retrieval:\n\t- air pollution retrieval \n\t- forecast retrieval\n\nHere’s what our `tests.cpp` will look like if organized by sections: \n\n```cpp\n#include \"functions.h\"\n#include \u003Ccatch2/catch_test_macros.hpp>\n#include \u003Cstring>\n\n// Mock HTTP request function that simulates API responses\nstd::string mockHttpRequest(const std::string &url)\n{\n   if (url.find(\"geo\") != std::string::npos)\n   {\n       // Mock response for geocoding\n       return R\"({\"lat\": 40.7128, \"lon\": -74.0060})\";\n   }\n   else if (url.find(\"air_pollution\") != std::string::npos)\n   {\n       // Mock response for air quality\n       return R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\";\n   }\n   else if (url.find(\"weather\") != std::string::npos)\n   {\n       // Mock response for current weather\n       return R\"({\n          \"weather\": [{\"main\": \"Clear\", \"description\": \"clear sky\"}],\n          \"main\": {\"temp\": 298.55}\n      })\";\n   }\n   return \"{}\";\n}\n\n// Overriding the actual httpRequest function with the mockHttpRequest for testing\nstd::string httpRequest(const std::string &url)\n{\n   return mockHttpRequest(url);\n}\n\n// Preprocessing Steps\nTEST_CASE(\"Preprocessing Steps\", \"[preprocessing]\") {\n   SECTION(\"API Key Retrieval\") {\n       // Set the API_KEY environment variable for testing\n       setenv(\"API_KEY\", \"test_key\", 1);\n       // Test if the key is retrieved correctly\n       REQUIRE_FALSE(getApiKey().empty());\n   }\n\n   SECTION(\"Geocode Functionality\") {\n       std::string apiKey = \"test_key\";\n       std::pair\u003Cdouble, double> coordinates = geocodeZipcode(\"90210\", apiKey);\n       // Check latitude\n       REQUIRE(coordinates.first == 40.7128);\n       // Check longitude \n       REQUIRE(coordinates.second == -74.0060);\n   }\n}\n\n// API Data Retrieval\nTEST_CASE(\"API Data Retrieval\", \"[data_retrieval]\") {\n   SECTION(\"Air Quality Functionality\") {\n       std::string apiKey = \"test_key\";\n       double lat = 40.7128;\n       double lon = -74.0060;\n       std::string response = fetchAirQuality(lat, lon, apiKey);\n       // Check the response\n       REQUIRE(response == R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\");\n   }\n\n   SECTION(\"Current Weather Functionality\") {\n       auto weather = getCurrentWeather(\"dummyApiKey\", 40.7128, -74.0060);\n       // Ensure main weather description is not empty\n       REQUIRE_FALSE(weather.main.empty());\n       // Validate that temperature is a reasonable value\n       REQUIRE(weather.temperature > 0);\n   }\n}\n```\n\nRebuild the code and run the tests again to verify.\n\n```shell\ncmake --build build \n./build/tests\n```\n\n## Next steps\n\nIn this post, we covered how to integrate unit testing into a `C++` project using Catch2 testing framework and GitLab CI/CD and set up basic tests for our reference air quality application project.\n\nTo explore these concepts further, you can check out the [Catch2 documentation](https://github.com/catchorg/Catch2) and [GitLab's Unit test report examples documentation](https://docs.gitlab.com/ee/ci/testing/unit_test_report_examples.html). \n\nFor an advanced async exercise, you could build upon this project by using GitLab Duo to implement a feature that retrieves and analyzes historical air quality data and add code quality checks into the CI/CD pipeline. Happy coding! \n","devsecops",[724,932,1035,9,703],{"slug":1159,"featured":91,"template":680},"develop-c-unit-testing-with-catch2-junit-and-gitlab-ci","content:en-us:blog:develop-c-unit-testing-with-catch2-junit-and-gitlab-ci.yml","Develop C Unit Testing With Catch2 Junit And Gitlab Ci","en-us/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci.yml","en-us/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci",{"_path":1165,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1166,"content":1172,"config":1179,"_id":1181,"_type":14,"title":1182,"_source":16,"_file":1183,"_stem":1184,"_extension":19},"/en-us/blog/developing-gitlab-duo-a-roundup-of-recent-chat-enhancements",{"title":1167,"description":1168,"ogTitle":1167,"ogDescription":1168,"noIndex":6,"ogImage":1169,"ogUrl":1170,"ogSiteName":693,"ogType":694,"canonicalUrls":1170,"schema":1171},"Developing GitLab Duo: A roundup of recent Chat enhancements","Discover the latest improvements to GitLab Duo Chat, including a new integration, prompt cancellation, and architectural upgrades.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098374/Blog/Hero%20Images/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25_7JlF3WlEkswGQbcTe8DOTB_1750098374059.png","https://about.gitlab.com/blog/developing-gitlab-duo-a-roundup-of-recent-chat-enhancements","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: A roundup of recent Chat enhancements\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jannik Lehmann\"},{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2024-07-10\",\n      }",{"title":1167,"description":1168,"authors":1173,"heroImage":1169,"date":1176,"body":1177,"category":10,"tags":1178},[1174,1175],"Jannik Lehmann","David O'Regan","2024-07-10","GitLab is committed to [continuously improving GitLab Duo Chat](https://gitlab.com/gitlab-org/gitlab/-/issues/430124), our AI assistant, to meet the evolving needs of our users. Here are some recent enhancements that will streamline your workflow and boost productivity.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today](https://about.gitlab.com/seventeen/)!\n\n## Vulnerability Explanation: A new integration\n\nWe've reached a significant milestone in the evolution of Chat: the integration of [GitLab Duo Vulnerability Explanation](https://about.gitlab.com/the-source/ai/understand-and-resolve-vulnerabilities-with-ai-powered-gitlab-duo/). This marks the first feature from our [GitLab Duo](https://about.gitlab.com/gitlab-duo/) platform to be integrated into Chat by a team outside of the AI group, showcasing the collaborative spirit and cross-functional capabilities at GitLab.\n\n### Key highlights of this integration:\n\n- **Swift execution:** The team moved from a spike to implementation in just three weeks, demonstrating agility and execution.\n- **Cross-team collaboration:** This integration was led by teams outside the AI group, paving the way for more diverse feature integrations in the future.\n- **Enhanced security insights:** Users will soon be able to leverage Chat to gain deeper understanding of vulnerabilities detected in their projects.\n\nThis integration represents a significant step forward in making Chat an even more powerful and versatile tool for developers, particularly in the realm of security.\n\n## Enhanced context awareness\n\nWe've made significant strides in improving Chat's context awareness, making it more intelligent and helpful in various scenarios.\n\n### Always-available knowledge\n\nGitLab Duo Chat always has access to:\n- GitLab documentation\n- General programming and coding knowledge\n\nIt's crucial to understand that Chat does not have unrestricted access to your entire GitLab instance or codebase. It only processes the specific information you provide in your query or what's immediately relevant to your current view in the GitLab UI or IDE.\n\nWe're continuously working to expand Chat's contextual awareness to include more types of content, always with a focus on user privacy and data security. This gradual expansion aims to make Chat an even more powerful assistant for your development workflow while maintaining appropriate data access boundaries.\n\n### Expanded contextual knowledge\n\nGitLab Duo Chat now has [a better understanding of the context you're working in](https://docs.gitlab.com/ee/user/gitlab_duo_chat/#the-context-chat-is-aware-of), both in the GitLab UI and IDEs. Here's a breakdown of what Chat is aware of.\n\nIn the GitLab UI:\n- **Epics** - Chat understands when you refer to \"this epic\" or use the epic's URL.\n- **Issues** - Similar to epics, Chat recognizes \"this issue\" or the issue's URL.\n- **Code files** - When viewing a single file, Chat can interpret requests about \"this code\" or \"this file\".\n\nIn IDEs:\n- **Selected code** - Chat can analyze code you've selected when you ask about \"this code\" or \"this file\".\n- **Epics and issues** -  Chat can understand context when you provide the URL.\n\nAdditionally, when using slash commands like `/explain`, `/refactor`, or `/tests` in IDEs, Chat has access to the selected code.\n\n![Screenshot of GitLab Duo Chat window](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098382/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098382107.png)\n\n### Chat history and caching\nGitLab Duo Chat retains the last 50 messages in the chat history. This history expires three days after last use. Closing your browser or IDE will not permanently delete your chat history within this timeframe, but it's important to note that long-term persistence of chat data is not currently supported.\n\n## Prompt cancellation: Stop responses on demand\n\nOne of the most anticipated features is now available: [prompt cancellation](https://gitlab.com/groups/gitlab-org/-/epics/13662). Users can now cancel ongoing prompts in Chat on GitLab.com, giving you [more control over your interactions](https://gitlab.com/gitlab-org/gitlab/-/issues/458397).\n- Available now: This feature has been rolled out on GitLab.com.\n- Coming soon: This feature will be available for self-managed instances in our next release. GitLab Dedicated users will receive it in the monthly upgrade.\n- Work in progress: [Integration for editor extensions](https://docs.gitlab.com/ee/editor_extensions/) - [follow along in the issue](https://gitlab.com/gitlab-org/editor-extensions/gitlab-jetbrains-plugin/-/issues/335).\n\nThis enhancement allows you to stop a response if you've sent a prompt too early or had a change of thought while waiting. It's a small but powerful feature that can save you time and frustration.\n\nTo cancel a prompt in GitLab Duo Chat, follow these steps:\n1. Open GitLab Duo Chat on GitLab.com.\n2. Start typing a prompt or question such as `What is this issue about?`.\n\n![Screen showing the start of how to cancel prompts in GitLab Duo Chat](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098382/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098382108.png)\n\n3. After sending the prompt, if you wish to cancel the response, look for the new \"Cancel\" button that appears while Chat is generating a response.\n\n![Screenshot of Chat with Cancel button](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098382/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098382110.png)\n\n4. Click the \"Cancel\" button to immediately stop the response generation.\n\n![Screenshot showing response stopped](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098382/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098382112.png)\n\n## Architectural improvements\n\nBehind the scenes, we've been working on architectural improvements to make GitLab Duo Chat more robust and efficient:\n\n- Moving to Language Server Protocol ([LSP](https://gitlab.com/gitlab-org/editor-extensions/gitlab-lsp)): This effort improves the integration of Chat with various development environments. \n- GitLab Language Server is an experimental TypeScript project that provides a common interface for IDE extensions to build GitLab functionality. It currently supports GitLab Duo Code Suggestions and upcoming will support GitLab Duo Chat.\n\nWhile this change primarily affects the underlying architecture, users may notice:\n- Improved responsiveness and performance when using Chat across different IDEs and editors.\n- More consistent behavior of Chat features across various development environments.\n- Enhanced ability to add new features and improvements in the future.\n\nCheck out our introduction to how GitLab Language Server powers Code Suggestions in this video walkthrough:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/VQlWz6GZhrs?si=_G5mOyYqEGAmnRv4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## What's next?\n\nWe're continuously improving GitLab Duo Chat. Here are some highlights:\n\n- We're in the process of migrating our AI features to [Claude 3.5 Sonnet](https://gitlab.com/gitlab-org/gitlab/-/issues/468334). This upgrade will bring improved performance and capabilities to Chat and other AI-powered features.\n- We're actively working on [enabling Chat to work with custom, self-hosted models](https://gitlab.com/groups/gitlab-org/-/epics/13760). This will allow organizations to use their own AI models with Chat, providing more control over the AI's knowledgebase and potentially improving performance for domain-specific tasks.\n- We're currently finishing the [synchronization of messages across all clients](https://gitlab.com/gitlab-org/gitlab/-/issues/418760), including WebUI, to ensure seamless communication and keep all your clients in sync, enhancing your collaboration experience.\n- We’re [migrating the “Summarize Comments” feature to Chat](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/156650). You'll be able to summarize multiple comments on an issue directly within Chat, helping you quickly understand the main points and key takeaways from discussions, thereby improving your collaborative experience.\n\nWe look forward to [hearing your feedback on these enhancements](https://gitlab.com/gitlab-org/gitlab/-/issues/430124). Stay tuned for more updates as we continue to evolve GitLab Duo Chat.\n\n> Find out even more about [how we are developing GitLab Duo](https://about.gitlab.com/blog/developing-gitlab-duo-series/) with our ongoing series.\n",[9,704,725],{"slug":1180,"featured":91,"template":680},"developing-gitlab-duo-a-roundup-of-recent-chat-enhancements","content:en-us:blog:developing-gitlab-duo-a-roundup-of-recent-chat-enhancements.yml","Developing Gitlab Duo A Roundup Of Recent Chat Enhancements","en-us/blog/developing-gitlab-duo-a-roundup-of-recent-chat-enhancements.yml","en-us/blog/developing-gitlab-duo-a-roundup-of-recent-chat-enhancements",{"_path":1186,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1187,"content":1193,"config":1199,"_id":1201,"_type":14,"title":1202,"_source":16,"_file":1203,"_stem":1204,"_extension":19},"/en-us/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai",{"title":1188,"description":1189,"ogTitle":1188,"ogDescription":1189,"noIndex":6,"ogImage":1190,"ogUrl":1191,"ogSiteName":693,"ogType":694,"canonicalUrls":1191,"schema":1192},"Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI","Our blog series continues spotlighting a new feature that provides detailed metrics, such as the Code Suggestions Usage Rate, to help understand the effectiveness of AI investments.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098611/Blog/Hero%20Images/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25_7JlF3WlEkswGQbcTe8DOTB_1750098611370.png","https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2024-05-15\",\n      }",{"title":1188,"description":1189,"authors":1194,"heroImage":1190,"date":1196,"body":1197,"category":10,"tags":1198},[1195],"Haim Snir","2024-05-15","***Generative AI marks a monumental shift in the software development industry, making it easier to develop, secure, and operate software. Our new blog series, written by our product and engineering teams, gives you an inside look at how we create, test, and deploy the AI features you need integrated throughout the enterprise. Get to know new capabilities within GitLab Duo and how they will help DevSecOps teams deliver better results for customers.***\n\nAs organizations adopt [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI features to power DevSecOps workflows, business and engineering leaders need real-time visibility into the technology's ROI. Granular usage data, performance improvements, the trade-off between speed, security, and quality, and other [productivity metrics](https://about.gitlab.com/blog/measuring-ai-effectiveness-beyond-developer-productivity-metrics/) are essential to evaluate the effectiveness of AI in software development. That's why we created the AI Impact analytics dashboard for GitLab Duo, available in GitLab 17.0, as a new way to measure the ROI of AI.\n\n> [Take an interactive tour of the AI Impact analytics dashboard](https://gitlab.navattic.com/ai-impact).\n\n## Understanding the ROI of GitLab Duo AI-powered capabilities\n\nTo properly evaluate AI's impact on the software development lifecycle, organizations have told us they want to:\n- visualize which metrics improved as a result of investments in AI\n- compare the performance of teams that are using AI against teams that are not using AI\n- track the progress of AI adoption\n- automate insights extraction from a large volume of performance data\n\nAI Impact analytics dashboard features these capabilities and more with customizable visualization, which enables teams to:\n- **Monitor AI adoption:** Observing AI adoption rates enables organizations to evaluate organizational strategies to maximize the ROI on their technology investments. \n- **Track performance improvements:** By tracking performance metrics and observing changes after the adoption of AI, leaders can quickly assess the benefits and business value of AI features.\n\n## What is the AI Impact analytics dashboard?\n\nIn this first release of the AI Impact analytics dashboard, we focus on providing insights and metrics about GitLab Duo Code Suggestions adoption, including:\n\n- **Detailed usage metrics:** Discover the ratio of monthly Code Suggestions usage compared to the total number of unique code contributors to know how deeply Code Suggestions is adopted within your teams.\n- **Correlation observations:** Examine how trends in AI usage within a project or across a group influence other crucial productivity metrics, displayed for the current month and the trailing six months. \n    - For this correlation analysis we added a new metric \"Code Suggestions Usage Rate\" as the Independent Variable (the cause). The monthly Code Suggestions Usage Rate is calculated as the number of monthly unique Code Suggestions users divided by total monthly unique [contributors](https://docs.gitlab.com/ee/user/profile/contributions_calendar.html#user-contribution-events). GitLab considers the total monthly unique code contributors, which means only users with pushed events are included in the calculation.\n    - As Dependent Variables (the effect), we added these [performance metrics](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html#dashboard-metrics-and-drill-down-reports): Cycle Time, Lead Time and Deployment Frequency. And as [Quality and Security Metrics](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html#dashboard-metrics-and-drill-down-reports), we added Change Failure Rate and Critical Vulnerabilities. \n- **Comparison view:**  Understand the difference in the performance of teams that are and are not using AI, and manage the trade-off between speed, quality, and security exposure.\n\n![Comparison of AI usage and SDLC performance](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098621/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098620998.png)\n\n## What’s next for the AI Impact analytics dashboard?\n\nLooking ahead, we have exciting plans to expand the capabilities of the AI Impact analytics dashboard. Here are some of the highlights:\n\n1. New tile visualizations such as \"GitLab Duo Seats: Assigned and Used,\" \"Code Suggestions: Acceptance Rate %,\" and \"GitLab Duo Chat: Unique Users\"  to gain a deeper insight into usage patterns for GitLab Duo.\n\n![AI Impact - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098621/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2024-07-17_at_12.50.31_aHR0cHM6_1750098620999.png)\n\n2. New comparison bar chart to help users observe how changes in one metric correlate with changes in others:\n\n![AI Impact comparison bar chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098621/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098621000.png)\n\n3. AI statistics in the [Contribution analytics report](https://docs.gitlab.com/ee/user/group/contribution_analytics/index.html) to understand how users interact with AI features. See which users are leveraging AI features and whether their performance has changed over time:\n\n![Contribution analytics report](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098621/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098621001.png)\n\n## Get started today\n\nWe're excited about the potential of the AI Impact analytics dashboard to not only demonstrate the real-world business outcomes of AI but also to drive more informed decisions regarding future AI as optimization for the DevSecOps lifecycle. For more information about what is coming next and to share feedback or questions, [please visit our AI Impact analytics dashboard epic](https://gitlab.com/groups/gitlab-org/-/epics/12978).\n\nStart your [free trial of GitLab Duo and the AI Impact analytics dashboard today](https://about.gitlab.com/gitlab-duo/#free-trial).\n\n## Read more of the \"Developing GitLab Duo\" series\n\n- [Developing GitLab Duo: How we validate and test AI models at scale](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale/)\n- [Developing GitLab Duo: How we are dogfooding our AI features](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features/)\n- [Developing GitLab Duo: Secure and thoroughly test AI-generated code](https://about.gitlab.com/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code/)\n- [Developing GitLab Duo: Blending AI and Root Cause Analysis to fix CI/CD pipelines](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/)\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._",[9,1037,704],{"slug":1200,"featured":91,"template":680},"developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai","content:en-us:blog:developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai.yml","Developing Gitlab Duo Ai Impact Analytics Dashboard Measures The Roi Of Ai","en-us/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai.yml","en-us/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai",{"_path":1206,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1207,"content":1213,"config":1221,"_id":1223,"_type":14,"title":1224,"_source":16,"_file":1225,"_stem":1226,"_extension":19},"/en-us/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd",{"title":1208,"description":1209,"ogTitle":1208,"ogDescription":1209,"noIndex":6,"ogImage":1210,"ogUrl":1211,"ogSiteName":693,"ogType":694,"canonicalUrls":1211,"schema":1212},"GitLab Duo: AI-powered CI/CD pipeline root cause analysis","Discover how we've infused Root Cause Analysis with AI to help remedy broken CI/CD pipelines, including example scenarios and take-away exercises.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097321/Blog/Hero%20Images/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25_7JlF3WlEkswGQbcTe8DOTB_1750097321081.png","https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: Blending AI and Root Cause Analysis to fix CI/CD pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rutvik Shah\"},{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2024-06-06\",\n      }",{"title":1214,"description":1209,"authors":1215,"heroImage":1210,"date":1218,"body":1219,"category":10,"tags":1220},"Developing GitLab Duo: Blending AI and Root Cause Analysis to fix CI/CD pipelines",[1216,1217],"Rutvik Shah","Michael Friedrich","2024-06-06","___Generative AI marks a monumental shift in the software development industry, making it easier to develop, secure, and operate software. Our new blog series, written by our product and engineering teams, gives you an inside look at how we create, test, and deploy the AI features you need integrated throughout the enterprise. Get to know new capabilities within GitLab Duo and how they will help DevSecOps teams deliver better results for customers.___\n\nHave you ever encountered a broken [CI/CD](https://about.gitlab.com/topics/ci-cd/benefits-continuous-integration/) pipeline and had to halt your DevSecOps workflow, or even delay software deployment, as you try to figure out the root cause? Traditionally, when something goes wrong in the process of creating software, developers have to troubleshoot, dig through log files, and often do a lot of trial and error development. [GitLab Duo Root Cause Analysis](https://about.gitlab.com/gitlab-duo/), part of our suite of AI-powered features, removes the guesswork by determining the root cause for a failed CI/CD pipeline. In this article, you'll learn what Root Cause Analysis is and how to apply the AI-powered GitLab Duo feature to your DevSecOps workflow.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n### What is Root Cause Analysis?\n\nGitLab Duo Root Cause Analysis is an AI-powered feature that assists you in determining a root cause and suggesting a fix for a CI/CD job log failure by analyzing the logs.\n\nWhile Root Cause Analysis is often seen in product incident management, its workflows and debugging practices can be found in any DevSecOps workflow. Ops teams, administrators, and platform engineers are challenged by infrastructure-as-code (IaC) deployment errors, Kubernetes and GitOps problems, and long stack traces while investigating pipeline failures.\n\nGitLab Duo Root Cause Analysis keeps everyone in the same interface and uses AI-powered help to summarize, analyze, and propose fixes so that organizations can release secure software faster.\n\nA pipeline can encounter failures for a variety of reasons, including syntax errors in the code, missing dependencies that the pipeline relies on, test failures during the build process, Kubernetes and IaC deployment timeouts, and numerous other potential issues. When such failures occur, it becomes the responsibility of everyone to meticulously review the logs generated by the pipeline. This job log review process involves scrutinizing the detailed output to identify the specific errors and pinpoint the root cause of the pipeline failure. For example, the following pipeline has multiple job failures that need to be investigated and fixed.\n\n![Image depicting multiple job failures](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097332/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097332601.png)\n\nThe duration required to fix these failures can vary significantly and is largely influenced by several factors such as:\n- the developer's familiarity with the project\n- their level of experience in dealing with similar issues\n- their overall skill level in troubleshooting and problem-solving within the context of the pipeline.\n\nManual analysis can be exceedingly challenging and time-consuming, given that log data consists of application logs and system messages with a wide variety of potential sources of failures. A typical pipeline fix can consist of several iterations and context switching. The complexity and the unstructured nature of the logs is a perfect fit for speeding up the task using generative AI.  Using AI can reduce the time to identify and fix a pipeline error significantly and also lower the barrier of expertise that would be needed to fix a pipeline such as the above.\n\nWatch GitLab Duo Root Cause Analysis in action:\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n \u003Ciframe src=\"https://www.youtube.com/embed/sTpSLwX5DIs?si=J6-0Bf6PtYjrHX1K\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n### How does Root Cause Analysis work?\n\n[Root Cause Analysis](https://docs.gitlab.com/ee/user/ai_experiments.html#root-cause-analysis) works by forwarding a portion of the CI/CD job log to the [GitLab AI Gateway](https://docs.gitlab.com/ee/architecture/blueprints/ai_gateway/). GitLab ensures that the portion sent will fit inside the large language model (LLM) token limits alongside a prompt that has been pre-crafted to provide insights into why the job might have failed. The prompt also instructs the LLM to provide an example of how a user might fix a broken job.\n\nHere are two example scenarios where Root Cause Analysis can provide assistance.\n\n#### 1. Analyze a Python dependency error\n\nA Python application can import package modules with functionality that is not provided in the standard library. The project [Challenge - Root Cause Analysis - Python Config](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/root-cause-analysis/challenge-root-cause-analysis-python-config) implements an application that parses configuration and initializes an SQLite database, which both work well without any dependencies. It uses best practices in CI/CD with a Python environment and caching. The latest feature implementation adds a Redis caching client, and now the CI/CD build is failing for some reason. \n\nBy using Root Cause Analysis, you can immediately learn that the `ModuleNotFoundError` text means that the module is actually not installed in the Python environment. GitLab Duo also suggests an example fix: Installing the Redis module through the PIP package manager. \n\n![Image depicting 'modulenotfounderror' and GL Duo suggested resolution](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097332/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097332602.png)\n\nThe failing pipeline can be viewed [here](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/root-cause-analysis/challenge-root-cause-analysis-python-config/-/jobs/6992716398). \n\nThe Root Cause Analysis prompt provides a summary of the problem, which seems to be a problem with a missing `redis` module. Let's try to fix the problem by installing the `redis` module. You can either call `pip install redis` in the CI/CD job `script` section, or use a more sophisticated approach with the `requirements.txt` file. The latter is useful for a single source of truth for dependencies installed in the development environment and CI/CD pipelines.\n\n```yaml\ntest:\n  extends: [.python-req]\n  stage: test \n  before_script:\n    # [🦊] hint: Root cause analysis.\n    # Solution 1: Install redis using pip\n    - pip install redis\n    # Solution 2: Add redis to requirements.txt, use pip\n    - pip install -r requirements.txt \n\n  script:\n    - python src/main.py\n```\n\nAfter fixing the missing Python dependency, the CI/CD job fails again. Use Root Cause Analysis again to learn that no Redis service is running in the job. Switch to using GitLab Duo Chat and use the prompt `How to start a Redis service in CI/CD` to learn how to configure the `services` attribute in the CI/CD job.\n\n![Depicts the prompt for how to start a Redis service](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097333/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097332602.png)\n\nModify the `.gitlab-ci.yml` with the `test` job, and specify the `redis` service.\n\n```yaml\ntest:\n  extends: [.python-req]\n  stage: test \n  before_script:\n    # [🦊] hint: Root cause analysis.\n    # Solution 1: Install redis using pip\n    - pip install redis\n    # Solution 2: Add redis to requirements.txt, use pip\n    - pip install -r requirements.txt \n\n  script:\n    - python src/main.py\n\n  # Solution 3 - Running Redis\n  services:\n    - redis\n```\n\nRunning the Redis server allows you to successfully execute the Python application, and print its output into the CI/CD job log.\n\n![output of Python application](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097332/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097332603.png)\n\nThe solution is provided in the [solution/ directory](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/root-cause-analysis/challenge-root-cause-analysis-python-config/-/tree/main/solution?ref_type=heads).\n\n**Tip:** You can also ask [GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) to follow up on potential future problems:\n\n```markdown\nHow to lint Python code? Which tools are recommended for CI/CD.\nHow to pin a package version in Python requirements file?\t\nWhat are possible ways that this exception stacktrace is triggered in the future?\nAre there ways to prevent the application from failing?\n``` \n\nThe next example is more advanced and includes multiple failures. \n\n#### 2. Analyze missing Go runtime\n\nCI/CD jobs can be executed in containers, spawned from the contributed `image` attribute. If the container does not provide a programming language runtime, the executed `script` sections referencing the `go` binary fail. For example, the error message `/bin/sh: eval: line 149: go: not found` needs to be understood and fixed. \n\nIf the `go` command is not found in the container's runtime context, this can have multiple reasons:\n\n1. The job uses a minimal container image, for example `alpine`, and the Go language runtime was not installed.\n1. The job uses the wrong default container image, for example, specified on top of the CI/CD configuration, or using the `default` keyword.\n1. The job does not use a container image but the shell executor. The host operating system does not have the Go language runtime installed, or it is otherwise broken/not configured.\n\nThe project [Challenge - Root Cause Analysis - Go GitLab Release Fetcher](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/root-cause-analysis/challenge-root-cause-analysis-go-gitlab-release-fetcher) provides an exercise challenge to analyze and fix CI/CD problems with a GitLab release fetcher application, written in Go. The `build` and `docker-build` CI/CD jobs are failing. Fixing the problem requires different scopes: Understanding why the Go runtime is not installed, and learning about the `Dockerfile` syntax. \n\n![Screenshot showing Change Docker Label job failed](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097332/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097332603.png)\n\nThe [`solution/` directory](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/root-cause-analysis/challenge-root-cause-analysis-go-gitlab-release-fetcher) provides two possible solutions after Root Cause Analysis. \n\n## Practice using Root Cause Analysis\n\nHere are some scenarios to use to practice using Root Cause Analysis.\n\n- When you are running into Kubernetes deployment errors or timeouts. \n\n- With OpenTofu or Terraform IaC pipelines failing to provision your cloud resources.\n\n- When the Ansible playbook fails with a cryptic permission error in CI/CD.\n\n- When the Java stack trace is 10 pages long.\n\n- With a shell script highlighting an execution error.\n\n- When a Perl script fails in a single line, which is the only line in the script.\n\n- When the CI/CD job times out and it is unclear which section would cause this.\n\n- When a network connection timeout is reached, and you think it cannot be DNS.\n\n### What is next for GitLab Duo Root Cause Analysis?\n\nWe want to help our users to get their pipelines back to passing in fewer iterations. The Root Cause Analysis will open and show the response in GitLab Duo Chat, our AI assistant. Users can build on the recommendation to generate a more precise fix by asking specific questions (e.g., programming language-specific fixes) or asking for alternative fixes based on the root cause.\n\nFor example, here is the Root Cause Analysis for a failing job:\n\n![Root Cause Analysis response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097332/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097332603.png)\n\nUsers can ask follow-up questions that build upon the AI-generated response.\n\n- I do not want to create my own Docker image. Please explain different ways to fix the problem.\n\n- I don't have access to the Docker image creation. It seems that the Go binary is missing. Are there alternative images you can suggest?\n\nGitLab also will be running quality benchmarks for the generated responses and shipping usability improvements.\n\nPlease see our [Root Cause Analysis GA epic](https://gitlab.com/groups/gitlab-org/-/epics/13080) for more details. We would also love your feedback on the feature. Please leave a comment on our [Root Cause Analysis feedback issue](https://gitlab.com/groups/gitlab-org/-/epics/13872).\n\n## Get started with Root Cause Analysis\n\nPlease see our [documentation](https://docs.gitlab.com/ee/user/ai_experiments.html#root-cause-analysis) on how to enable the feature available to our GitLab Ultimate customers. Also, GitLab Duo Root Cause Analysis will soon be coming to GitLab self-managed and GitLab Dedicated.\n\nNot a GitLab Ultimate customer? Start [a 30-day free trial](https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/blog&glm_content=default-saas-trial) today. \n\n## Read more of our \"Developing GitLab Duo\" series\n\n- [Developing GitLab Duo: How we validate and test AI models at scale](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n\n- [Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n\n- [Developing GitLab Duo: How we are dogfooding our AI features](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n\n- [Developing GitLab Duo: Secure and thoroughly test AI-generated code](https://about.gitlab.com/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code/)",[9,724,703,482,704],{"slug":1222,"featured":91,"template":680},"developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd","content:en-us:blog:developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd.yml","Developing Gitlab Duo Blending Ai And Root Cause Analysis To Fix Ci Cd","en-us/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd.yml","en-us/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd",{"_path":1228,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1229,"content":1235,"config":1240,"_id":1242,"_type":14,"title":1243,"_source":16,"_file":1244,"_stem":1245,"_extension":19},"/en-us/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features",{"title":1230,"description":1231,"ogTitle":1230,"ogDescription":1231,"noIndex":6,"ogImage":1232,"ogUrl":1233,"ogSiteName":693,"ogType":694,"canonicalUrls":1233,"schema":1234},"Developing GitLab Duo: How we are dogfooding our AI features","As part of our blog series, we share real-world examples of how we integrate AI throughout our software development lifecycle and how we use metrics to gauge their success.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098360/Blog/Hero%20Images/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25_7JlF3WlEkswGQbcTe8DOTB_1750098360821.png","https://about.gitlab.com/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: How we are dogfooding our AI features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2024-05-20\",\n      }",{"title":1230,"description":1231,"authors":1236,"heroImage":1232,"date":1237,"body":1238,"category":10,"tags":1239},[1175],"2024-05-20","***Generative AI marks a monumental shift in the software development industry, making it easier to develop, secure, and operate software. Our new blog series, written by our product and engineering teams, gives you an inside look at how we create, test, and deploy the AI features you need integrated throughout the enterprise. Get to know new capabilities within GitLab Duo and how they will help DevSecOps teams deliver better results for customers.***\n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI-powered features, has transformed our internal engineering workflows, driving efficiency gains across our development process. As strong proponents of dogfooding and transparency, we wanted to showcase how our teams leverage AI, including standouts like GitLab Duo Code Suggestions and GitLab Duo Chat, daily to streamline development processes, reduce manual effort, and enhance productivity. You'll learn about the benefits we've experienced for highly technical teams like engineering to less technical teams such as technical writing and product management.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n## Real-world use cases\n\nOur teams have integrated [GitLab Duo's many features](https://about.gitlab.com/gitlab-duo/#features) into their daily routines. Here are some examples of how GitLab Duo is helping them carry out everyday activities.\n\n### Summarization and documentation\n- **Streamline the code review process:** Staff Backend Developer [Gosia Ksionek](https://about.gitlab.com/company/team/#mksionek) showcases the practical benefits of AI in her workflow by using GitLab Duo to streamline the code review process. She effectively utilizes GitLab Duo to [summarize merge requests](https://youtu.be/3SIhe8dgFEc), making it easier and faster to review code changes. In addition to summarizing merge requests, Gosia also leverages GitLab Duo to [answer coding questions](https://www.youtube.com/watch?v=6n0I53XsjTc) and [explain complex code snippets](https://www.youtube.com/watch?v=3m2YRxa1SCY). This enhances her productivity and helps her better understand and manage intricate codebases. Through these demonstrations, Gosia highlights how GitLab Duo can significantly improve efficiency and clarity in the development process, making it an invaluable tool for developers.\n\n\u003Ccenter>\n\nWatch Gosia use GitLab Duo Merge Request Summary:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/3SIhe8dgFEc?si=Q8JG3Ix3K_THhbpv\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWatch Gosia use GitLab Duo to answer coding questions: \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/6n0I53XsjTc?si=LA9VBHrgXpfJImSL\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWatch Gosia use GitLab Duo to explain complex code snippets:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/3m2YRxa1SCY?si=oms3szKwZoz-4yeq\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003C/center>\n\n- **Condense comment threads:** [Bartek Marnane](https://about.gitlab.com/company/team/#bmarnane), Vice President of Expansion Software Development, uses GitLab Duo to condense lengthy comment threads into concise summaries, ensuring all relevant details are captured when updating issue descriptions.\n\n- **Create new documentation:** [Taylor McCaslin](https://about.gitlab.com/company/team/#tmccaslin), Group Manager, Product - Data Science Section, leveraged GitLab Duo to [create new documentation for GitLab Duo itself](https://docs.gitlab.com/ee/user/ai_features.html), exemplifying a meta use case that enhances clarity and consistency and greatly reduces the time to document new features.\n\n- **Craft release notes:** [Amanda Rueda](https://about.gitlab.com/company/team/#amandarueda), Senior Product Manager for Product Planning, uses GitLab Duo to [craft brief, impactful summaries for release notes](https://gitlab.com/groups/gitlab-org/-/epics/10267), highlighting changes and their value to users. By using well-crafted prompts like below, Amanda supercharges her workflow and ensures that each release note is clear, concise, and user-focused, enhancing the overall communication and user experience:\u003Cbr>\u003Cbr>\n*“Please create a two sentence summary of this change, which can be used for our release notes. The tone should be conversational and should be in second person. The summary should include a description of the problem or change and be tied to the value we are creating for you, the user.”*\n\u003Cbr>\u003Cbr>\n    - Here are some examples of release notes co-created with GitLab Duo:\n      - [Expanded options for sorting your Roadmap](https://gitlab.com/gitlab-org/gitlab/-/issues/460492)\n      - [Issue Board Clarity now with Milestone & Iteration](https://gitlab.com/gitlab-org/gitlab/-/issues/25758)\n      - [Design Management Features Extended to Product Teams](https://gitlab.com/gitlab-org/gitlab/-/issues/438829)\n\n- **Optimize docs site navigation:** [Suzanne Selhorn](https://about.gitlab.com/company/team/#sselhorn), Staff Technical Writer, tapped GitLab Duo to [optimize the left navigation of documentation](https://docs.gitlab.com/ee/user/get_started/get_started_projects.html) by providing a workflow-based order of pages. Suzanne provided a list of features to GitLab Duo, which generated the optimal order, updating the left navigation to match. GitLab Duo also drafted the [Getting Started](https://docs.gitlab.com/ee/user/get_started/get_started_planning_work.html) documentation much faster than were she to use traditional, manual approaches.\n\n### Goal setting and team alignment\n- **Draft and refine OKRs:** [François Rosé](https://about.gitlab.com/company/team/#francoisrose), Engineering manager, Create:Code Review Backend, finds [GitLab Duo Chat](https://about.gitlab.com/blog/gitlab-duo-chat-now-generally-available/) invaluable for drafting and refining OKRs. By articulating objectives more clearly and effectively, François enhances goal setting and team alignment. Using Chat, François ensures that each OKR is precise, actionable, and aligned with the team's goals, thereby improving overall team performance and cohesion. Here is an example prompt he uses:\u003Cbr>\u003Cbr>\n\n    *\"Here is an OKR I am thinking of creating:*\n\n    *Objective: Retrospect on retrospectives, to foster a thriving team*\n\n    *KR: Measure retrospective satisfaction from 100% of team members*\n\n    *KR: Identify 3 improvements to the async retrospectives*\n\n    *KR: Implement 1 improvement*\n\n    *Please provide direct feedback on how to improve the formulation of this objective and these key results.\"*\n\u003Cbr>\u003Cbr>\n\n- **Streamlined hiring and recruitment processes:** Chat helped [Denys Mishunov](https://about.gitlab.com/company/team/#dmishunov), Staff Frontend Engineer, formulate a clear and concise text for updating the email template for technical interview candidates. The team collaborated on refining the communication to ensure candidates receive all necessary information using a merge request. This example showcased the practical application of AI tools in enhancing communication processes within the hiring workflow.\n\n### Incident response and configuration\n- **Summarize production incidents:** [Steve Xuereb](https://about.gitlab.com/company/team/#sxuereb), Staff Site Reliability Engineer, employs GitLab Duo to summarize production incidents and create detailed incident reviews, streamlining the documentation process.\n\n- **Create boilerplate `.gitlab-ci.yml` files:**  Steve also uses Chat to create boilerplate `.gitlab-ci.yml` files, which significantly sped up his workflow. [Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) serves as a valuable partner for suggesting ideas. Additionally, [Code Explanation](https://docs.gitlab.com/ee/user/ai_features.html#code-explanation) provides detailed answers that are helpful during incidents, enhancing his productivity and understanding of the codebase.\n\n### Code generation and testing\n- **Full-stack development:** [Peter Hegman](https://about.gitlab.com/company/team/#peterhegman), Senior Frontend Engineer, has been using [Code Suggestions for his JavaScript and Ruby development](https://gitlab.com/gitlab-org/gitlab/-/issues/435783#note_1731321963). This highlights that Code Suggestions has become a powerful tool for developers moving across a full technical stack. \n\n- **Generate Python scripts:** Denys conducted [an experiment using GitLab Duo for a non-GitLab task](https://gitlab.com/gitlab-org/ai-powered/ai-framework/ai-experimentation). This example highlights the flexibility and utility of our AI tools beyond typical software development tasks.\n\n\u003Ccenter>\nWatch how Denys uses GitLab Duo to generate Python scripts to fetch content data and store it locally:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/30ZTtk4K5yU?si=p5ZcFLg6dTZL5gFE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003C/center>\n\n### Research and support\n- **Generate test source code:**  [Michael Friedrich](https://about.gitlab.com/company/team/#dnsmichi), Senior Developer Advocate, uses GitLab Duo to generate test source code for CI/CD components. This approach has been shared in various talks and presentations, such as the recent Open Source @ Siemens event ([public slides](https://go.gitlab.com/duA2Fc)). Using GitLab Duo in this manner helps ensure that the code is consistent, well-documented, and aligned with our best practices. Check out his [Rust example](https://gitlab.com/components/rust#contributing).\n\n![Rust example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098367/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098367547.png)\n\n- **Streamline research tasks:** Our team members consistently turn to Chat when they have questions about GitLab features, streamlining their research and support tasks. Michael shared, \"When I have a question about GitLab features, I default to using Chat instead of opening 100 browser tabs. This workflow helps me assist users on our community forum efficiently. For instance, I recently [helped a user with SSH deployment](https://forum.gitlab.com/t/how-to-make-ssh-deployment-more-clear-in-gitlab/102051/4?u=dnsmichi) using this method.\" Using Chat not only saves time but also provides quick, accurate information, enhancing the support we offer to our community.\n\n### Feature testing\n- **Test new features:** Our engineers use GitLab Duo to test new features like [Markdown support in Code Suggestions](https://gitlab.com/gitlab-org/gitlab/-/issues/443365). One of our team members noted, \"I need to test Markdown support in Code Suggestions for writing blog posts and GitLab docs in VS Code. I saw it was merged for 17.0.\" By testing these features internally, we ensure they meet our quality standards before release.\n\n### Understanding external codebases\n- **Explain external projects:** GitLab Duo's `/explain` feature is particularly useful for understanding external projects imported into GitLab. This capability was highlighted in a recent livestream he did with open source expert Eddie Jaoude. Michael let us know, \"I use `/explain` on external projects to understand the source code. I pitched this idea for learning about open source projects, dependencies, etc. during the livestream.\" This feature is invaluable for developers who need to quickly grasp the functionality and dependencies of unfamiliar codebases, significantly improving their efficiency and understanding.\n\n\u003Ccenter>\nWatch Michael demo `/explain` during a livestream with Eddie Jaoude:\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/L2Mx8hOhkEE?si=R7W3v4EDqeJCaPOw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003C/center>\n\n## GitLab Duo's benefits\n\nThe integration of GitLab Duo has brought about numerous positive impacts, significantly enhancing our engineering and product development workflows:\n\n- Many tasks that previously required manual intervention are now automated, freeing up valuable time for our engineers. For example, summarizing long threads and creating boilerplate code are now more efficient, allowing our team to focus on more complex issues.\n- The time taken to document and summarize issues has decreased, allowing for quicker information dissemination and decision-making.\n- With AI-assisted code suggestions and explanations, our teams produce higher quality code with fewer errors and faster debugging processes. The integration of GitLab Duo into incident reviews and coding assistance has led to more efficient and effective code reviews.\n- Administrative tasks, such as drafting OKRs and creating release notes, have been streamlined. \n\nGitLab Duo has helped to not only improve our efficiency but also to enhance the quality and speed of our development processes, illustrating the transformative power of AI in software development.\n\n## What's next?\n\nWe are committed to further integrating AI into our workflows and continuously improving GitLab Duo features based on internal feedback and evolving needs. The ongoing collection of use cases and metrics with the [AI Impact analytics dashboard](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/) will guide enhancements and ensure that GitLab Duo remains at the forefront of AI-driven development tools.\n\n![Dogfooding Duo - AI analytics dashboard](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098367/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098367547.png)\n\n> [Get started using GitLab Duo today with our free trial.](https://about.gitlab.com/gitlab-duo/#free-trial)\n\n## Read more \"Developing GitLab Duo\"\n\n- [Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n- [Developing GitLab Duo: How we validate and test AI models at scale](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale/)\n- [Developing GitLab Duo: Secure and thoroughly test AI-generated code](https://about.gitlab.com/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code/)\n- [Developing GitLab Duo: Blending AI and Root Cause Analysis to fix CI/CD pipelines](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/)",[9,786,704,482,746],{"slug":1241,"featured":91,"template":680},"developing-gitlab-duo-how-we-are-dogfooding-our-ai-features","content:en-us:blog:developing-gitlab-duo-how-we-are-dogfooding-our-ai-features.yml","Developing Gitlab Duo How We Are Dogfooding Our Ai Features","en-us/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features.yml","en-us/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features",{"_path":1247,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1248,"content":1253,"config":1259,"_id":1261,"_type":14,"title":1262,"_source":16,"_file":1263,"_stem":1264,"_extension":19},"/en-us/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale",{"title":1249,"description":1250,"ogTitle":1249,"ogDescription":1250,"noIndex":6,"ogImage":691,"ogUrl":1251,"ogSiteName":693,"ogType":694,"canonicalUrls":1251,"schema":1252},"Developing GitLab Duo: How we validate and test AI models at scale","Our blog series debuts with a behind-the-scenes look at how we evaluate LLMs, match them to use cases, and fine-tune them to produce better responses for users.","https://about.gitlab.com/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: How we validate and test AI models at scale\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Susie Bitters\"}],\n        \"datePublished\": \"2024-05-09\",\n      }",{"title":1249,"description":1250,"authors":1254,"heroImage":691,"date":1256,"body":1257,"category":10,"tags":1258},[1255],"Susie Bitters","2024-05-09","**_Generative AI marks a monumental shift in the software development industry, making it easier to develop, secure, and operate software. Our new blog series, written by our product and engineering teams, gives you an inside look at how we create, test, and deploy the AI features you need integrated throughout the enterprise. Get to know new capabilities within GitLab Duo and how they will help DevSecOps teams deliver better results for customers._**\n\nGitLab values the trust our customers place in us. Part of maintaining that trust is transparency in how we build, evaluate, and ensure the high-quality functionality of our [GitLab Duo](https://about.gitlab.com/gitlab-duo/) AI features. GitLab Duo features are powered by a diverse set of models, which allows us to support a broad set of use cases and gives our customers flexibility. GitLab is not tied to a single model provider by design. We currently use foundation models from [Google](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/main/ai_gateway/models/vertex_text.py?ref_type=heads#L86) and [Anthropic](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/main/ai_gateway/models/anthropic.py?ref_type=heads#L62). However, we continuously assess what models are the right matches for GitLab Duo’s use cases. In this article, we give you an inside look at our AI model validation process.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n## Understanding LLMs\n\nLarge language models (LLMs) are generative AI models that power many AI features across the platform. Trained on vast datasets, LLMs predict the next word in a sequence based on preceding context. Given an input prompt, they generate human-like text by sampling from the probability distribution of words conditioned on the prompt.\n\nLLMs enable intelligent code suggestions, conversational chatbots, code explanations, vulnerability analysis, and more. Their ability to produce diverse outputs for a given prompt makes standardized quality evaluation challenging. LLMs can be optimized for different characteristics, which is why there are so many AI models actively being developed.\n\n## Testing at scale\n\nUnlike traditional software systems where inputs and outputs can be more easily defined and tested, LLMs produce outputs that are often nuanced, diverse, and context-dependent. Testing these models requires comprehensive strategies that account for subjective and variable interpretations of quality, as well as the stochastic nature of their outputs. We, therefore, cannot judge the quality of an LLM’s output in an individual or anecdotal fashion; instead, we need to be able to examine the overall pattern of an LLM's behavior. To get a sense of those patterns, we need to test at scale. Testing at scale refers to the process of evaluating the performance, reliability, and robustness of a system or application across a large and diverse array of datasets and use cases. Our [Centralized Evaluation Framework (CEF)](https://about.gitlab.com/direction/ai-powered/ai_framework/ai_evaluation/) utilizes thousands of prompts tied to dozens of use cases to allow us to identify significant patterns and assess the overall behavior of our foundational LLMs and the GitLab Duo features in which they are integrated.\n\nTesting at scale helps us:\n\n- **Ensure quality:** Testing at scale enables us to assess the quality and reliability of these models across a wide range of scenarios and inputs. By validating the outputs of these models at scale, we can start to identify patterns and mitigate potential issues such as systematic biases, anomalies, and inaccuracies. \n- **Optimize performance:** Scaling up testing efforts allows GitLab to evaluate the performance and efficiency of LLMs under real-world conditions. This includes assessing factors such as output quality, latency, and cost to optimize the deployment and operation of these models in GitLab Duo features.\n- **Mitigate risk:** Testing LLMs at scale helps mitigate the risks associated with deploying LLMs in critical applications. By conducting thorough testing across diverse datasets and use cases, we can identify and address potential failure modes, security vulnerabilities, and ethical concerns before they impact our customers.\n\nTesting LLMs at scale is imperative for ensuring their reliability and robustness for deployment within the GitLab platform. By investing in comprehensive testing strategies that encompass diverse datasets, use cases, and scenarios, GitLab is working to unlock the full potential of AI-powered workflows while mitigating potential risks.\n\n### How we test at scale\n\nThese are the steps we take to test LLMs at scale.\n\n#### Step 1: Create a prompt library as a proxy for production\nWhile other companies view and use customer data to train their AI features, GitLab currently does not.  As a result, we needed to develop a comprehensive prompt library that is a proxy for both the scale and activity of production.\n\nThis prompt library is composed of questions and answers. The questions represent the kinds of queries or inputs that we would expect to see in production, while the answers represent a ground truth of what our ideal answer would be. This ground truth answer could also be mentally framed as a target answer. Both the question and the answer may be human generated, but are not necessarily so. These question/answer pairs give us a basis for comparison and a reference frame that allow us to tease out differences between models and features. When multiple models are asked the same question and generate different responses, we can use our ground truth answer to determine which model has provided an answer that is most closely aligned to our target and score them accordingly.\n\nAgain, a key element of a comprehensive prompt library is ensuring that it is representative of the inputs that we expect to see in production. We want to know how well foundational models fit to our specific use case, and how well our features are performing. There are numerous benchmark prompt datasets, but those datasets may not be reflective of the use cases that we see for features at GitLab. Our prompt library is designed to be specific to GitLab features and use cases.\n\n#### Step 2: Baseline model performance\n\nOnce we have crafted a prompt library that accurately reflects production activity, we feed those questions into [various models](https://about.gitlab.com/direction/ai-powered/ai_framework/ai_evaluation/foundation_models/) to test how well they serve our customer’s needs. We compare each response to our ground truth and provide it a ranking based on a series of metrics including: [Cosine Similarity Score](https://about.gitlab.com/direction/ai-powered/ai_framework/ai_evaluation/metrics/#similarity-scores), [Cross Similarity Score](https://about.gitlab.com/direction/ai-powered/ai_framework/ai_evaluation/metrics/#cross-similarity-score),  [LLM Judge](https://about.gitlab.com/direction/ai-powered/ai_framework/ai_evaluation/metrics/#llm-judge), and [Consensus Filtering with an LLM Judge](https://about.gitlab.com/direction/ai-powered/ai_framework/ai_evaluation/metrics/#consensus-filtering-with-llm-judge). This first iteration provides us a baseline for how well each model is performing, and guides our selection of a foundational model for our features. For brevity, we won’t go into the details here, but we encourage you to [learn more about more about the metrics here](https://about.gitlab.com/direction/ai-powered/ai_framework/ai_evaluation/metrics/). It is important to note this isn’t a solved problem; the wider AI industry is actively researching and developing new techniques. GitLab’s model validation team keeps a pulse on the industry and is continuously iterating on how we measure and score the LLMs GitLab Duo uses.  \n\n#### Step 3: Feature development\n\nNow that we have a baseline for our selected model's performance, we can start developing our features with confidence. While prompt engineering gets a lot of buzz, focusing entirely on changing the behavior of a model via prompting (or any other technique) without validation means that you are operating in the dark and very possibly overfitting your prompting. You may solve one problem, but be causing a dozen more. You would never know. Creating a baseline for a model's performance allows us to track how we are changing behavior over time for all our necessary use cases. At GitLab, we re-validate the performance of our features on a daily basis during active development to help ensure that all changes improve the overall functionality.\n\n#### Step 4: Iterate, iterate, iterate\n\nHere is how our experimental iterations work. Each cycle, we examine the scores from our tests at scale to identify patterns:\n\n- What are the commonalities across our weakest areas?\n- Is our feature performing poorly based on a specific metric or on a certain use case?\n- Do we see consistent errors popping up in response to a certain kind of question?\n\nOnly when we test at scale do these kinds of patterns begin to emerge and allow us to focus our experiments. Based on these patterns, we propose a variety of experiments or approaches to try to improve performance in a specific area and on a specific metric.\n\nHowever, testing at scale is both expensive and time-consuming. To enable faster and less expensive iteration, we craft a smaller scale dataset to act as a mini-proxy. The focused subset will be weighted to include question/answer pairs that we know we want to improve upon, and the broader subset will also include sampling of all the other use cases and scores to ensure that our changes aren't adversely affecting the feature broadly. Make your change and run it against the focused subset of data. How does the new response compare to the baseline? How does it compare to the ground truth?\n\nOnce we have found a prompt that addresses the specific use case we are working on with the focused subset, we validate that prompt against a broader subset of data to help ensure that it won’t adversely affect other areas of the feature. Only when we believe that the new prompt improves our performance in our target area through validation metrics AND doesn’t degrade performance elsewhere, do we push that change to production.\n\nThe entire Centralized Evaluation Framework is then run against the new prompt and we validate that it has increased the performance of the entire feature against the baseline from the day before. In this way, GitLab is constantly iterating to help ensure that you are getting the latest and greatest performance of AI-powered features across the GitLab ecosystem. This allows us to ensure that we keep working faster, together.\n\n### Making GitLab Duo even better\n\nHopefully this gives you insight into how we’re responsibly developing GitLab Duo features. This process has been developed as we’ve brought [GitLab Duo Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/) and [GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) to general availability. We’ve also integrated this validation process into our development process as we iterate on GitLab Duo features. It’s a lot of trial and error, and many times fixing one thing breaks three others. But we have data-driven insights into those impacts, which helps us ensure that GitLab Duo is always getting better.\n\n> Start a [free trial of GitLab Duo](https://about.gitlab.com/gitlab-duo/#free-trial) today!\n\n ## Resources\n - [GitLab AI Transparency Center](https://about.gitlab.com/ai-transparency-center/)\n - [GitLab's AI Ethics Principles for Product Development](https://handbook.gitlab.com/handbook/legal/ethics-compliance-program/ai-ethics-principles/)\n - [GitLab AI-powered Direction page](https://about.gitlab.com/direction/ai-powered/)\n\n\u003Cfigure class=video_container>\n\u003Ciframe width=560 height=315 src=\"https://www.youtube-nocookie.com/embed/LifJdU3Qagw?si=A4kl6d32wPYC4168\" title=\"YouTube video player\" frameborder=0 allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\" allowfullscreen=\"\">\u003C/iframe>\n\u003C/figure>\n\n## Read more of the \"Developing GitLab Duo\" series\n\n- [Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n- [Developing GitLab Duo: How we are dogfooding our AI features](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features/) \n- [Developing GitLab Duo: Secure and thoroughly test AI-generated code](https://about.gitlab.com/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code/)\n- [Developing GitLab Duo: Blending AI and Root Cause Analysis to fix CI/CD pipelines](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/)",[9,703,482,704,1036],{"slug":1260,"featured":91,"template":680},"developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale","content:en-us:blog:developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale.yml","Developing Gitlab Duo How We Validate And Test Ai Models At Scale","en-us/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale.yml","en-us/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale",{"_path":1266,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1267,"content":1272,"config":1278,"_id":1280,"_type":14,"title":1281,"_source":16,"_file":1282,"_stem":1283,"_extension":19},"/en-us/blog/developing-gitlab-duo-series",{"title":1268,"description":1269,"ogTitle":1268,"ogDescription":1269,"noIndex":6,"ogImage":691,"ogUrl":1270,"ogSiteName":693,"ogType":694,"canonicalUrls":1270,"schema":1271},"Developing GitLab Duo series","Our unique blog series, written by our Product and Engineering teams, takes you behind the scenes of our AI innovation and guides you through our newest AI features powering your DevSecOps workflow.","https://about.gitlab.com/blog/developing-gitlab-duo-series","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo series\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab Team\"}],\n        \"datePublished\": \"2024-06-03\",\n      }",{"title":1268,"description":1269,"authors":1273,"heroImage":691,"date":1275,"body":1276,"category":10,"tags":1277},[1274],"GitLab Team","2024-06-03","Generative AI marks a monumental shift in the software development industry, making it easier to develop, secure, and operate software. Our blog series, written by our product and engineering teams, gives you an inside look at how we create, test, and deploy the AI features you need integrated throughout the enterprise. Get to know new capabilities within GitLab Duo and how they will help DevSecOps teams deliver better results for customers.\n\n> Live demo! Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Register today](https://about.gitlab.com/seventeen/)!\n\n## 1. [How we validate and test AI models at scale](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale/)\n\n- Our blog series debuts with a behind-the-scenes look at how we evaluate LLMs, match them to use cases, and fine-tune them to produce better responses for users.\n\n## 2. [AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n\n- We spotlight a new feature that provides detailed metrics, such as the Code Suggestions Usage Rate, to help understand the effectiveness of AI investments.\n\n## 3. [How we are dogfooding our AI features](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features/)\n\n- We share real-world examples of how we integrate AI throughout our software development lifecycle and how we use metrics to gauge their success.\n\n## 4. [Secure and thoroughly test AI-generated code](https://about.gitlab.com/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code/)\n\n- Learn step-by-step how to enhance AI-generated code reliability and security using GitLab Duo and GitLab Pages (includes code samples and prompts).\n\n## 5. [Blending AI and Root Cause Analysis to fix CI/CD pipelines](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/)\n\n- Discover how we've infused Root Cause Analysis with AI to help remedy broken CI/CD pipelines, including example scenarios and take-away exercises.\n\n## 6. [Developing GitLab Duo: A roundup of recent Chat enhancements](https://about.gitlab.com/blog/developing-gitlab-duo-a-roundup-of-recent-chat-enhancements)\n- Discover the latest improvements to GitLab Duo Chat, including prompt cancellation and architectural upgrades. Learn how these updates streamline workflows and boost productivity.\n\n> Learn more about [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our AI-powered suite of features for your DevSecOps workflow. Then start [a free trial of GitLab Duo](https://about.gitlab.com/gitlab-duo/#free-trial) to get the incredible benefits in your own organization! \n\n##  7. [Developing GitLab Duo: Use AI to remediate security vulnerabilities](https://about.gitlab.com/blog/developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities)\nThis tutorial shows how GitLab Duo Vulnerability Explanation and GitLab Duo Vulnerability Resolution, along with our other AI-powered features, can help to address vulnerabilities quickly.",[9,704,703,482],{"slug":1279,"featured":6,"template":680},"developing-gitlab-duo-series","content:en-us:blog:developing-gitlab-duo-series.yml","Developing Gitlab Duo Series","en-us/blog/developing-gitlab-duo-series.yml","en-us/blog/developing-gitlab-duo-series",{"_path":1285,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1286,"content":1292,"config":1298,"_id":1300,"_type":14,"title":1301,"_source":16,"_file":1302,"_stem":1303,"_extension":19},"/en-us/blog/developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities",{"title":1287,"description":1288,"ogTitle":1287,"ogDescription":1288,"noIndex":6,"ogImage":1289,"ogUrl":1290,"ogSiteName":693,"ogType":694,"canonicalUrls":1290,"schema":1291},"Developing GitLab Duo: Use AI to remediate security vulnerabilities ","This tutorial shows how GitLab Duo Vulnerability Explanation and GitLab Duo Vulnerability Resolution, along with our other AI-powered features, can help to address vulnerabilities quickly.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098106/Blog/Hero%20Images/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25_7JlF3WlEkswGQbcTe8DOTB_1750098106040.png","https://about.gitlab.com/blog/developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: Use AI to remediate security vulnerabilities \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"},{\"@type\":\"Person\",\"name\":\"Alana Bellucci\"}],\n        \"datePublished\": \"2024-07-15\",\n      }",{"title":1287,"description":1288,"authors":1293,"heroImage":1289,"date":1295,"body":1296,"category":10,"tags":1297},[1217,1294],"Alana Bellucci","2024-07-15","You’ve just started into a new job, and on your first day, a large-scale production incident requires all hands on deck. There are a number of critical new vulnerabilities that require immediate attention, analysis, mitigation and remediation. Where do you start your investigation? \n\nLearn how GitLab Duo Vulnerability Explanation and GitLab Duo Vulnerability Resolution, along with our other AI-powered features, can help you begin addressing vulnerabilities in minutes. You will learn how to benefit from AI-powered assistance to analyze and explain vulnerabilities in a practical example. Additional remediation is highlighted with AI-generated code fixes in MRs to aid faster vulnerability resolution.\n\n> Start [a free trial of GitLab Duo](https://about.gitlab.com/gitlab-duo/#free-trial) to bring these powerful vulnerability remediation benefits to your own organization!\n\n## How to get started: Analyze\n\nThe first step is to analyze the impact and severity of the vulnerability. Open the GitLab UI and navigate into the [vulnerability report](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/) in the `Secure > Vulnerability Report` menu. Filter the vulnerability list by `SAST`, and identify the most critical vulnerabilities to work on.\n\n![Vulnerability reports overview](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/vulnerability_reports_overview_aHR0cHM6_1750098116056.png)\n\nThe SAST scanner results are summarized in the detail view, linking to the source code. They provide details from publicly available security advisories. As a developer, it is often hard to start the analysis from the security report, unless you are fully aware of the attack scope, technical details, and vulnerable environments.\n\n## Understand and mitigate with Vulnerability Explanation \n\nUnderstanding the vulnerability and how to fix it in the best and most efficient way is crucial. Fixes must not break existing functionality. If they do, a discussion with maintainers and product owners will be necessary, and, as such, will require a high-level summary and potential mitigation alternatives. Code that someone who left the company wrote or code that has no tests can make the planning for a fix even more difficult. \n\nAI-powered Vulnerability Explanation helps with a summary of how an attacker can exploit the vulnerability, and provides more explanations about the impact and potential fixes. \n\nThe following example shows an OS Command Injection vulnerability, using this code snippet:\n\n```php\n\u003C?php \n\n// Read variable name from GET request\n$name = $_GET['name'];\n\n// Use the variable name to call eval and print its value \neval('echo $' . $name . ';');\n```\n\nThe vulnerability report does not go into much detail, and requires understanding of the full context and impact. Select `Explain vulnerability` from the upper right corner, which will open GitLab Duo Chat with a pre-defined prompt action. This will give an additional summary of the vulnerability, describe how the vulnerability can be exploited, and provide a suggested fix. \n\n![Improper Neutralization of\nSpecial Elements used in an OS Command\n('OS Command Injection') ](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098116057.png)\n\n### Make Vulnerability Explanation a conversation with context \n\nYou’ll also recognize a change in UX: The previous vulnerability explanation overlay was replaced with a GitLab Duo Chat workflow. Sometimes, a complex vulnerability unfolds into multiple mitigation steps, or unclear source code paths.\n\nYou can navigate into the source code tree, and continue with the same Chat context to explain, fix, refactor, and test the code. \n\nLet’s try the full workflow with an example in C, where security scanning detected a buffer overflow.\n\n1. Open the security vulnerability detail view, and select \"Explain vulnerability\" on the button in the upper right. This will open up the Chat prompt, providing a summary of the problem, potential attack vectors, and a proposed fix.\n\n![AI for vulnerabilities - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750098116059.png)\n\n2. Review the proposed fix, and ask Chat in a follow-up prompt to share alternative paths, using `Can you show an alternative fix using a different function`. The idea is to learn about alternative functions to `strcpy()` that can be more safe to use. \n\n![AI for vulnerabilities - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098116060.png)\n\n3. Chat comes up with an alternative fix using `strlcpy()` in the following example. The function only copies as many characters as allowed in the target string, and always terminates the string with null. It also returns the length of the source string to determine whether the string was truncated. \n\n![AI for vulnerabilities - image 5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098116062.png)\n\n4. Next, click on the `Location` file URL to jump into the source code view. Open Chat again, and verify that the previous vulnerability explanation context is still there. As a next step, we want to add tests before continuing with a proposed fix. This helps to avoid breaking functionality or introduce regressions. For example, use this Chat prompt: `Based on the vulnerability context and opened source code, how would you add tests for it?`.\n\n![AI for vulnerabilities - image 7 ](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098116063.png)\n\n5. After generating tests (and assuming they were added now), you can also ask Chat to refactor the source code, using the prompt `Can you refactor the source code too?` in the same session.\n\n![AI for vulnerabilities - image 6](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098116063.png)\n\nThe workflow shows how to analyze, understand, mitigate, get alternative approaches, add tests, and even refactor fixes for vulnerabilities. \n\nYou can continue this path using Chat, and then switch into the Web IDE to modify the source code after learning how to do it. Additional continued workflows include committing changes and triggering CI/CD and security scans for the full DevSecOps lifecycle loop. \n\n## Remediate with AI-assisted Vulnerability Resolution \n\nUnderstanding and mitigating a security vulnerability still requires engineering work to create a fix for the problem, run pipelines and security scanning in a new merge request again. It can also be necessary to deploy the fixes into a staging environment and test them for a longer period of time.\n\nAI can help here with generating a proposed fix based on the provided context of the vulnerability and source code.\n\nTip: Think of the most annoying vulnerability you had to fix in your career, and re-create the use case example for your GitLab Duo adoption. The [MITRE CWE Top 25 of the most dangerous software weaknesses](https://cwe.mitre.org/top25/archive/2023/2023_top25_list.html) also provides a good starting point.  \n\nThe following example implements [CWE-328: Use of a weak hash function](https://cwe.mitre.org/data/definitions/328.html) by using `md5`. It is correctly identified by [SAST scanning](https://docs.gitlab.com/ee/user/application_security/sast/). \n\n```python\nimport hashlib\n\nclass User:\n    def __init__(self, username, password):\n        self.username = username\n        self.password = password\n\n    def set_password(self, password):\n        self.password = hashlib.md5(password.encode()).hexdigest()\n```\n\n![AI for vulnerabilities - image 8](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098116064.png)\n\nClick on the button in the upper right `Resolve with merge request`.  This will open an MR that uses AI to propose the fix. For this vulnerability, one possible fix could be using a different hash function. \n\n![AI for vulnerabilities - image 9](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098116065.png)\n\nAnother common vulnerability example is not checking function error codes or potential exceptions. The following C code snippets implement an example for timing attacks against file operations with [CWE-362](https://cwe.mitre.org/data/definitions/362.html) for the `fopen()` and `chmod()` calls. \n\n```c\n#include \u003Cstdio.h>\n#include \u003Cstring.h>\n#include \u003Csys/mman.h>\n#include \u003Csys/stat.h>\n#include \u003Cunistd.h>\n\nint main(int argc, char **argv) {\n\n    // File operations\n    char *fname = \"gitlab.keksi\";\n\n    FILE *fp;\n    fp = fopen(fname, \"r\");\n    fprintf(fp, \"Hello from GitLab Duo Vulnerability Resolution Challenge\");\n    fclose(fp);\n\n    // Potential chmod() timing attacks    \n\n    // Make the file world readable\n    chmod(fname, S_IRWXU|S_IRWXG|S_IRWXO);\n\n    return 0;\n}\n```\n\nThe SAST report for `chmod()` can look like the following: \n\n![AI for vulnerabilities - image 10](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098116065.png)\n\nThe proposed `chmod()` merge request includes error handling, and fixes another potential issue with world writable files, changing the permissions from `777` to `600`.\n\n![AI for vulnerabilities - image 11](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098116066.png)\n\n> Try this async exercise: Find, analyze, and fix the vulnerability for the `fopen()` function.\n\n## More AI assistance required from GitLab Duo \n\nOften, a security problem can be resolved with a quick fix or a workaround that grants the development teams time to discuss and plan a more long-term solution. In other cases, the problem becomes more complex and requires feature APIs disabled, or firewall mitigation, until a proper fix can be rolled into production.\n\nGitLab Duo offers additional AI-powered features that can help resolve these issues. \n\n**Code Explanation:** As a developer or security engineer, it's crucial to feel confident in the changes you've made. Within the IDE, you can use the [Code Explanation feature](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#explain-code-in-the-ide) to gain a deeper understanding of the AI-suggested fix for the vulnerability. This ensures you know exactly what adjustments have been made and why.\n\n**Root Cause Analysis:** If the fix breaks your pipeline, you can utilize the [Root Cause Analysis feature](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/). This tool helps identify and explain the underlying problem, allowing you to address it effectively. After applying the necessary corrections, you can rerun the tests to ensure a successful resolution.\n\n**Refactor:** Even if the vulnerability has been fixed, it's worth considering if the code can be written in a safer manner. In the IDE, you can open GitLab Duo Chat and use the [refactor action](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#refactor-code-in-the-ide) to explore alternative, more secure ways to write your code. This proactive approach helps maintain a robust and secure codebase.\n\nBy leveraging these GitLab Duo features, you can confidently navigate and resolve vulnerabilities, ensuring your code remains secure and efficient.\n\n## What’s next?\n\nWe plan to bring both Vulnerability Explanation and Vulnerability Resolution \"left\" by incorporating them directly into the MR process. This integration ensures that you can address and resolve vulnerabilities earlier in the development cycle, streamlining your workflow and enhancing code security from the outset.\n\n## Get started with GitLab Duo\n\nPlease see our [documentation](https://docs.gitlab.com/ee/user/gitlab_duo/turn_on_off.html) on how to enable the feature available to our GitLab Ultimate customers. Also, GitLab Duo [Vulnerability Explanation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#explaining-a-vulnerability) and [Vulnerability Resolution](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#vulnerability-resolution) will soon be coming to GitLab self-managed and GitLab Dedicated.\n\nYou can keep up with what's new in GitLab Duo by [following the \"Developing GitLab Duo\" blog series](https://about.gitlab.com/blog/developing-gitlab-duo-series/).\n\n> Start [a free trial of GitLab Duo](https://about.gitlab.com/gitlab-duo/#free-trial) to bring these powerful vulnerability remediation benefits to your own organization!\n",[9,678,725,704,724],{"slug":1299,"featured":91,"template":680},"developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities","content:en-us:blog:developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities.yml","Developing Gitlab Duo Use Ai To Remediate Security Vulnerabilities","en-us/blog/developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities.yml","en-us/blog/developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities",{"_path":1305,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1306,"content":1312,"config":1318,"_id":1320,"_type":14,"title":1321,"_source":16,"_file":1322,"_stem":1323,"_extension":19},"/en-us/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws",{"title":1307,"description":1308,"ogTitle":1307,"ogDescription":1308,"noIndex":6,"ogImage":1309,"ogUrl":1310,"ogSiteName":693,"ogType":694,"canonicalUrls":1310,"schema":1311},"DevSecOps + Agentic AI: Now on GitLab Self-Managed Ultimate on AWS","Start using AI-powered, DevSecOps-enhanced agents in your AWS GitLab Self-Managed Ultimate instance. Enjoy the benefits of GitLab Duo and Amazon Q in your organization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659604/Blog/Hero%20Images/Screenshot_2024-11-27_at_4.55.28_PM.png","https://about.gitlab.com/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevSecOps + Agentic AI: Now on GitLab Self-Managed Ultimate on AWS\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jackie Porter\"}],\n        \"datePublished\": \"2025-01-16\",\n      }",{"title":1307,"description":1308,"authors":1313,"heroImage":1309,"date":1315,"body":1316,"category":10,"tags":1317},[1314],"Jackie Porter","2025-01-16","We are thrilled to announce the GitLab Duo with Amazon Q offering, previously [shared at AWS 2024 re:Invent](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai/), is now available in Preview (Beta) for GitLab Self-Managed Ultimate users, at no additional cost. This milestone brings AI agentic experiences to organizations that maintain their own GitLab instance.\n\n### What does this mean for GitLab Self-Managed Ultimate customers? \n\nBeginning in our [17.8 release](https://about.gitlab.com/releases/2025/01/16/gitlab-17-8-released/), GitLab Self-Managed Ultimate customers can now take advantage of the GitLab Duo with Amazon Q Preview (Beta) [capabilities](https://docs.gitlab.com/ee/user/duo_amazon_q/). There are three key experiences you will be able to access: \n- AI-powered feature development: Use the `/q dev` quick action to transform requirements into merge-ready code.\n- Automated code reviews: Leverage `/q review` for instant, intelligent feedback on code quality and security.\n- Java modernization: Streamline Java application upgrades with `/q transform`.\n\n### Getting started with the Preview (Beta) \n\nTo use these capabilities in your GitLab Self-Managed Ultimate instance:\n\n- Ensure you meet the [prerequisites](https://docs.gitlab.com/ee/user/duo_amazon_q/setup.html#prerequisites), including upgrading to GitLab 17.8, have an Ultimate subscription (no trial access), and have the instance hosted on AWS. \n- Enable your GitLab Duo with Amazon Q integration settings.\n- Configure IAM identity and roles in AWS and the GitLab AI gateway.\n- Add the Amazon Q user to the project. \n\nFor more detailed setup information, see our [documentation](https://docs.gitlab.com/ee/user/duo_amazon_q/setup.html). \n\n### Looking ahead\n\nThis Preview release represents our commitment to bringing enterprise-grade AI capabilities to all GitLab Ultimate customers. We're excited to work closely with our customers during this Preview (Beta) period to ensure GitLab Duo with Amazon Q delivers a superior experience. \nWe encourage GitLab Self-Managed Ultimate customers to begin exploring these capabilities and provide feedback. Your input will be invaluable in shaping the future of AI-powered development in GitLab.\n\n### Get started today \n\nGitLab Self-Managed Ultimate customers can begin enabling and configuring GitLab Duo with Amazon Q as outlined in our [setup documentation](https://docs.gitlab.com/ee/user/duo_amazon_q/setup.html). To learn more about how the Preview (Beta) release can transform your software development, visit our [website](https://about.gitlab.com/partners/technology-partners/aws/#interest). \nStay tuned for regular updates as we continue to enhance and expand the capabilities of GitLab Duo with Amazon Q.\n",[9,482,725,787,1135],{"slug":1319,"featured":6,"template":680},"devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws","content:en-us:blog:devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws.yml","Devsecops Agentic Ai Now On Gitlab Self Managed Ultimate On Aws","en-us/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws.yml","en-us/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws",{"_path":1325,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1326,"content":1332,"config":1338,"_id":1340,"_type":14,"title":1341,"_source":16,"_file":1342,"_stem":1343,"_extension":19},"/en-us/blog/devsecops-platforms-give-smbs-security-muscle",{"title":1327,"description":1328,"ogTitle":1327,"ogDescription":1328,"noIndex":6,"ogImage":1329,"ogUrl":1330,"ogSiteName":693,"ogType":694,"canonicalUrls":1330,"schema":1331},"DevSecOps platforms give SMBs security muscle","A single platform enables teams to build, test, and deploy secure software with fewer resources.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667094/Blog/Hero%20Images/container-security.jpg","https://about.gitlab.com/blog/devsecops-platforms-give-smbs-security-muscle","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevSecOps platforms give SMBs security muscle\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2023-01-10\",\n      }",{"title":1327,"description":1328,"authors":1333,"heroImage":1329,"date":1335,"body":1336,"category":1156,"tags":1337},[1334],"Sharon Gaudin","2023-01-10","\nDevOps professionals with both security training and experience come at a high price and can be hard to find. That makes it especially difficult for startups and small and medium-sized businesses (SMBs), which generally don’t have deep pockets, to get the security professionals they need.\n\nSmaller businesses often end up with no security team, so they have to hire consultants. Even worse, they might end up having little to no security help at all, which will cause problems for their customers as well as their own business.\n\nOne efficient [way to deal with that](/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform/) is to adopt a DevSecOps platform, which enables organizations to build, test, and deploy secure software with fewer hands and [expenses](/blog/how-smbs-can-save-with-gitlabs-devops-platform/).\n\n“Someone in an SMB likely doesn’t have enough expertise, or even enough people, in-house to handle every part of DevOps, so they end up having to hire a contractor or consultant to take on things like security and monitoring, and that strains their budget,” says [Fatima Sarah Khalid](https://gitlab.com/sugaroverflow), a developer evangelist at GitLab. “By adopting the GitLab DevSecOps Platform, they can more easily handle this work, despite limited resources.”\n \n## Four benefits for SMBs\n\nSo how does a complete DevSecOps platform add security muscle to a small business? \n\n### 1. Finding vulnerabilities early\n \nWith a single, end-to-end platform, [security is integrated throughout](/stages-devops-lifecycle/secure/), and not just bolted on as an afterthought. With capabilities like dynamic and static application security testing, vulnerability management, and dependency and container scanning, developers can find vulnerabilities earlier in the process when they often can be more easily and quickly fixed. By shifting security left this way, teams can perform threat and vulnerability analysis as developers create the code - not when it’s about to be deployed. Shifting security left also creates more secure software, and decreases the time it would have taken to track down a problem created much earlier in the process.\n \n### 2. Easing work with automation\n \nAutomation, which is built into a single DevSecOps platform, is critical because it brings consistency and repeatability to the entire software lifecycle, reducing the potential for human error and minimizing the introduction of bugs and risks. And that enables SMBs to produce more secure software for their own organizations, as well as for their customers.\n \nAnother major advantage of automation is that it minimizes the need for a lot of extra hands-on and time-consuming work, like code reviews and testing. Startups and small businesses, by nature, have smaller DevOps teams. They might even have an IT team of one or two people, who do everything from building software to serving as the help desk. Saving them from having to do repetitive manual work gives them back precious time they can spend on more innovative and productive jobs.\n \nAll of that automated testing is automatically logged and documented, helping organizations create easily searchable and useful best practices that will help speed future software builds.\n \n### 3. Ensuring compliance\n \nSMBs and companies just getting off the ground don’t want to get tripped up by tricky and costly compliance issues. Luckily, the same end-to-end platform enables teams to verify the compliance of their code without leaving their workflow. In GitLab, for example, compliance confirmation lives within the platform and is automated. Developers don't have to context-switch among different point solutions, boosting their productivity and efficiency. Automating compliance also removes one more task from [developers’ already busy schedules](/blog/ease-pressure-on-smb-developers-with-a-devops-platform/).\n \n### 4. Establishing security imperatives\n \nA DevSecOps platform gives SMBs speed and efficiency, without requiring them to string together various security tools or hire security consultants. With a platform, because security practices and automation are integrated from the very start, an SMB’s DevOps environment has a solid security foundation. One solution. One answer to security needs.\n\n## Meeting the security need\n \nIn today’s environment, security and compliance are business imperatives. There’s no getting around it.\n\nSo having a strategic, end-to-end platform approach, where security and compliance are embedded from planning to production, provides efficiency and value unmatched by traditional, third-party application security vendors. Companies that may be using DevOps but are only tacking together different tools simply aren’t getting the security advantages that come from a single DevSecOps application.\n\nStartups and SMBs have a steep hill to climb just to survive. Between March 2020 and March 2021, 1 million small businesses opened in the U.S., but 833,458 closed, according to the U.S. Small Business Administration. And in a volatile economic climate, survival gets even tougher. Today’s high inflation rates and market instability have small businesses bracing for economic uncertainties, according to the [MetLife & U.S. Chamber of Commerce Small Business Index](https://www.uschamber.com/sbindex/summary).\n\nAnd those numbers are just about sheer survival. That’s not to mention actually gaining a solid foothold in an organization’s industry, attracting loyal customers, and successfully taking on bigger competitors, which just makes the hill small businesses are climbing even steeper.\n\nPrepare to make that climb easier by migrating to a single, end-to-end platform. [Download our SMB-focused ebook](https://page.gitlab.com/resources-ebook-trading-diy-devops-for-a-single-platform-smb.html) learn the advantages to moving from a DIY DevOps toolchain to GitLab’s platform.\n",[849,703,678,9],{"slug":1339,"featured":6,"template":680},"devsecops-platforms-give-smbs-security-muscle","content:en-us:blog:devsecops-platforms-give-smbs-security-muscle.yml","Devsecops Platforms Give Smbs Security Muscle","en-us/blog/devsecops-platforms-give-smbs-security-muscle.yml","en-us/blog/devsecops-platforms-give-smbs-security-muscle",{"_path":1345,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1346,"content":1352,"config":1359,"_id":1361,"_type":14,"title":1362,"_source":16,"_file":1363,"_stem":1364,"_extension":19},"/en-us/blog/empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners",{"title":1347,"description":1348,"ogTitle":1347,"ogDescription":1348,"noIndex":6,"ogImage":1349,"ogUrl":1350,"ogSiteName":693,"ogType":694,"canonicalUrls":1350,"schema":1351},"GPU-enabled runners for ModelOps and HPC workloads in CI/CD","Learn how to leverage our GitLab-hosted GPU-enabled runners for ModelOps and high-performance computing workloads.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682702/Blog/Hero%20Images/gitlab-data-science-icon.png","https://about.gitlab.com/blog/empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Empower ModelOps and HPC workloads with GPU-enabled runners integrated with CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabriel Engel\"}],\n        \"datePublished\": \"2023-07-06\",\n      }",{"title":1353,"description":1348,"authors":1354,"heroImage":1349,"date":1356,"body":1357,"category":10,"tags":1358},"Empower ModelOps and HPC workloads with GPU-enabled runners integrated with CI/CD",[1355],"Gabriel Engel","2023-07-06","\n\n\u003Ci>This blog post is the latest in an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). Start with the first blog post: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nIn today's fast-paced world, organizations are constantly looking to improve their [ModelOps](/direction/modelops/) and high-performance computing (HPC) capabilities. Leveraging powerful graphical processing units ([GPUs](https://www.techtarget.com/searchvirtualdesktop/definition/GPU-graphics-processing-unit)) has become a game-changer for accelerating machine learning workflows and compute-intensive tasks. To help meet these evolving needs, we recently released our first GPU-enabled runners on GitLab.com.\n\nSecurely hosting a GitLab Runner environment for ModelOps and HPC is non-trivial and requires a lot of knowledge and time to set up and maintain. In this blog post, we'll look at some real-world examples of how you can harness the potential of GPU computing for ModelOps or HPC workloads while taking full advantage of a SaaS solution.\n\n## What are GPU-enabled runners?\nGPU-enabled runners are dedicated computing resources for the AI-powered DevSecOps platform. They provide accelerated processing power for ModelOps and HPC such as the training or deployment of large language models ([LLMs](https://www.techtarget.com/whatis/definition/large-language-model-LLM)) as part of ModelOps workloads. In the first iteration of releasing GPU-enabled runners, [GitLab.com SaaS offers](https://docs.gitlab.com/ee/ci/runners/saas/gpu_saas_runner.html) the GCP `n1-standard-4` machine type (4 vCPU, 15 GB memory) with 1 NVIDIA T4 (16 GB memory) attached. The runner behaves like a GitLab Runner on Linux, using the docker+machine [executor](https://docs.gitlab.com/runner/executors/). \n\n## Using GPU-enabled runners\nTo take advantage of GitLab GPU-enabled runners, follow these steps:\n\n### 1. Have a project on GitLab.com\nAll projects on GitLab.com SaaS with a `Premium` or `Ultimate` [subscription](https://about.gitlab.com/pricing/) have the GPU-enabled runners enabled by default - no additional configuration is required.\n\n### 2. Create a job running on GPU-enabled runners\nCreate a job in your `.gitlab-ci.yml` configuration file, and set the [runner `tag`](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#use-tags-to-control-which-jobs-a-runner-can-run) to the `saas-linux-medium-amd64-gpu-standard` value. \n\n```yaml\ngpu-job:\n  stage: build\n  tags:\n    - saas-linux-medium-amd64-gpu-standard\n```\n\n### 3. Select a Docker image with the Nvidia CUDA driver\n\nThe CI/CD job runs in an isolated virtual machine (VM) with a bring-your-own-image policy as with [GitLab SaaS runners on Linux](https://docs.gitlab.com/ee/ci/runners/saas/linux_saas_runner.html). GitLab mounts the GPU from the host VM into your isolated environment. You must use a Docker image with the GPU driver installed to use the GPU. For Nvidia GPUs, you can use the [CUDA Toolkit](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda) directly, or third-party images with Nvidia drivers installed, such as the [TensorFlow GPU image](https://hub.docker.com/r/tensorflow/tensorflow/).\n\nThe CI/CD job configuration for the Nvidia CUDA base Ubuntu image looks like this:\n\n```yaml\n  image: nvcr.io/nvidia/cuda:12.1.1-base-ubuntu22.04\n```\n\n### 4. Verify that the GPU is working\nTo verify that the GPU drivers are working correctly, you can execute the `nvidia-smi` command in the CI/CD job `script` section. \n\n```yaml\n  script:\n    - nvidia-smi\n```\n\n## Basic usage examples\nLet's explore some basic scenarios where GPU-enabled runners can supercharge your ModelOps and HPC workloads:\n\n### Example 1: ModelOps with Python\nIn this example, we train a model on our GPU-enabled runner defined in the `train.py` file using the Nvidia CUDA base Ubuntu image mentioned earlier.\n\n`.gitlab-ci.yml` file:\n```yaml\nmodel-training:\n  stage: build\n  tags:\n    - saas-linux-medium-amd64-gpu-standard\n  image: nvcr.io/nvidia/cuda:12.1.1-base-ubuntu22.04\n  script:\n    - apt update\n    - apt install -y --no-install-recommends python3 python3-pip \n    - pip3 install -r requirements.txt\n    - python3 --version\n    - python3 train.py\n```\n\n### Example 2: Scientific simulations and HPC\nComplex scientific simulations require significant computing resources. GPU-enabled runners can accelerate these simulations, allowing you to get results in less time.\n\n`.gitlab-ci.yml` file:\n```yaml\nsimulation-run:\n  stage: build\n  tags:\n    - saas-linux-medium-amd64-gpu-standard\n  image: nvcr.io/nvidia/cuda:12.1.1-base-ubuntu22.04\n  script:\n    - ./run_simulation --input input_file.txt\n```\n\n## Advanced usage examples\nLet's go through some real-world scenarios of how we use GPU-enabled runners at GitLab.\n\n### Example 3: Python model training with a custom Docker image\nFor our third example, we will use this [handwritten digit recognition model](https://gitlab.com/gitlab-org/modelops/demos/handwritten-digit-recognition). We are using this project as a demo to showcase or try out new ModelOps features.\n\n[Open the project](https://gitlab.com/gitlab-org/modelops/demos/handwritten-digit-recognition) and fork it into your preferred namespace. You can follow the next steps using the [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/) in the browser, or clone the project locally to create and edit the files. Some of the next steps require you to override existing configuration in the `Dockerfile` and `.gitlab-ci.yml`. \n\nAs we need more pre-installed components and want to save installation time when training the model, we decided to create a custom Docker image with all dependencies pre-installed. This also gives us full control over the build environment we use and allows us to reuse it locally without relying on the `.gitlab-ci.yml' implementation.\n\nIn addition, we are using a more complete pipeline configuration with the following stages:\n\n```yaml\nstages:\n  - build\n  - test\n  - train\n  - publish\n```\n\n![GPU pipeline overview](https://about.gitlab.com/images/blogimages/2023-07-06-gpu-enabled-runners-for-modelops/pipeline-overview.png)\n\n#### Building a custom Docker image\nThe first step is to define a `Dockerfile`. In this example, we start with the Nvidia CUDA base Ubuntu image and then install `Python3.10`. Using `pip install`, we then add all the required libraries specified in a `requirements.txt` file.\n\n```docker\nFROM nvcr.io/nvidia/cuda:12.1.1-base-ubuntu22.04\n\n1. Update and install required packages\nRUN apt-get update && apt-get install -y \\\n    python3.10 \\\n    python3.10-dev \\\n    python3-pip \\\n    && rm -rf /var/lib/apt/lists/*\n\n2. Set Python 3.10 as the default Python version\nRUN ln -s /usr/bin/python3.10 /usr/bin/python\n\n3. Copy the requirements.txt file\nCOPY requirements.txt /tmp/requirements.txt\n\n4. Install Python dependencies\nRUN pip3 install --no-cache-dir -r /tmp/requirements.txt\n```\n\nIn the `.gitlab-ci.yml` file we use [Kaniko](https://docs.gitlab.com/ee/ci/docker/using_kaniko.html) to build the Docker image and push it into the [GitLab Container Registry](https://docs.gitlab.com/ee/user/packages/container_registry/).\n\n```yaml\nvariables:\n  IMAGE_PATH: \"${CI_REGISTRY_IMAGE}:latest\"\n  GIT_STRATEGY: fetch\n\ndocker-build:\n  stage: build\n  tags:\n    - saas-linux-medium-amd64\n  image:\n    name: gcr.io/kaniko-project/executor:v1.9.0-debug\n    entrypoint: [\"\"]\n  script:\n    - /kaniko/executor\n      --context \"${CI_PROJECT_DIR}\"\n      --dockerfile \"${CI_PROJECT_DIR}/Dockerfile\"\n      --destination \"${IMAGE_PATH}\"\n      --destination \"${CI_REGISTRY_IMAGE}:${CI_COMMIT_TAG}\"\n  rules:\n    - if: $CI_COMMIT_TAG\n```\n\nIn [rules](https://docs.gitlab.com/ee/ci/yaml/#rules) we define to only trigger the Docker image build for a new git tag. The reason is simple - we don't want to run the image build process for every time we train the model.\n\nTo start the image build job [create a new Git tag](https://docs.gitlab.com/ee/user/project/repository/tags/#create-a-tag). You can either do this by using `git tag -a v0.0.1` command or via UI. Navigate into `Code > Tags` and click on `New Tag`. As Tag name type `v0.0.1` to create a new Git tag and trigger the job.\n\nNavigate to `Build > Pipelines` to verify the `docker-build` job status, and then locate the tagged image following [`Deploy > Container Registry`](https://docs.gitlab.com/ee/user/packages/container_registry/).\n\n![Docker image](https://about.gitlab.com/images/blogimages/2023-07-06-gpu-enabled-runners-for-modelops/gpu-docker-image.png)\n\n#### Testing the Docker image\nTo test the image, we will use the following `test-image` job and run `nvidia-smi` and check that the GPU drivers are working correctly.\n\nThe job configuration in `.gitlab-ci.yml` file looks as follows:\n\n```yaml\ntest-image:\n  stage: test\n  tags:\n    - saas-linux-medium-amd64-gpu-standard\n  image: $IMAGE_PATH\n  script:\n    - nvidia-smi\n  rules:\n    - if: $CI_COMMIT_TAG\n```\n\nWe also include container scanning and more [security scanning](https://docs.gitlab.com/ee/user/application_security/) templates in the `.gitlab-ci.yml` file.\n\n```yaml\ninclude:\n  - template: Security/Secret-Detection.gitlab-ci.yml\n  - template: Security/Container-Scanning.gitlab-ci.yml\n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml\n  - template: Security/SAST.gitlab-ci.yml\n```\n\n#### Training the model with our custom Docker image\nNow that we have built our Custom docker image, we can train the model without installing any more dependencies in the job.\n\nThe train job in our `.gitlab-ci.yml` looks like this:\n\n```yaml\ntrain:\n  stage: train\n  tags:\n    - saas-linux-medium-amd64-gpu-standard\n  image: $IMAGE_PATH\n  script:\n    - python train_digit_recognizer.py\n  artifacts:\n    paths:\n      - mnist.h5\n    expose_as: 'trained model'\n```\n\nNavigate to `Build > Pipelines` to see the job logs.\n\n![Train job logs](https://about.gitlab.com/images/blogimages/2023-07-06-gpu-enabled-runners-for-modelops/train-job-log.png)\n\nFrom here, you can also inspect the `train` job artifacts.\n\n#### Publishing the model\nIn the last step of our `.gitlab-ci.yml` file, we are going to publish the trained model.\n\n```yaml\npublish:\n  stage: publish\n  when: manual\n  dependencies:\n    - train\n  image: curlimages/curl:latest\n  script:\n    - 'curl --header \"JOB-TOKEN: $CI_JOB_TOKEN\" --upload-file mnist.h5 \"${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/MNIST-Model/${CI_COMMIT_TAG}/mnist.h5\"'\n```\n\nNavigate to `Build > Pipelines` and trigger the `publish` job manually. After that, navigate into `Deploy > Package Registry` to verify the uploaded trained model.\n\n![Package Registry](https://about.gitlab.com/images/blogimages/2023-07-06-gpu-enabled-runners-for-modelops/package-registry.png)\n\n### Example 4: Jupyter notebook model training for ML-powered GitLab Issue triage\n\nIn the last example, we are using our GPU-enabled runner to train the internal [GitLab model to triage issues](https://gitlab.com/gitlab-org/ml-ops/tanuki-stan/-/tree/using-gpu-enabled-runner). We use this model at GitLab to determine and assign issues to the right team from the context of the issue description.\n\nDifferent from the previous examples, we now use the [`tensorflow-gpu` container image](https://hub.docker.com/r/tensorflow/tensorflow) and install the [requirements](https://gitlab.com/gitlab-org/ml-ops/tanuki-stan/-/blob/using-gpu-enabled-runner/notebooks/requirements.tensorflow-gpu.txt) in the job itself.\n\n`.gitlab-ci.yml` configuration:\n\n```yaml\ntrain:\n  tags:\n    - saas-linux-medium-amd64-gpu-standard\n  image: tensorflow/tensorflow:2.4.1-gpu\n  script:\n    - nvidia-smi\n    - cd notebooks\n    - pip install -r requirements.tensorflow-gpu.txt\n    - jupyter nbconvert --to script classify_groups.ipynb\n    - apt-get install -y p7zip-full\n    - cd ../data\n    - 7z x -p${DATA_PASSWORD} gitlab-issues.7z\n    - cd ../notebooks\n    - python3 classify_groups.py\n  artifacts:\n    paths:\n      - models/\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\" || $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH  \n      when: manual\n      allow_failure: true\n```\n\n![TensorFlow train](https://about.gitlab.com/images/blogimages/2023-07-06-gpu-enabled-runners-for-modelops/tensorflow-train.png)\n\nIf you are interested in another Jupyter notebook example, check out our recently published video on [Training ML Models using GPU-enabled runner](https://youtu.be/tElegG4NCZ0).\n\n\u003Ciframe width=\"768\" height=\"432\" src=\"https://www.youtube.com/embed/tElegG4NCZ0\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\" allowfullscreen>\u003C/iframe>\n\n## Results\nThe integration of GPU-enabled runners on GitLab.com SaaS opens up a new realm of possibilities for ModelOps and HPC workloads.\nBy harnessing the power of GPU-enabled runners, you can accelerate your machine learning workflows, enable faster data processing, and improve scientific simulations, all while taking full advantage of a SaaS solution and avoiding the hurdles of hosting and maintaining your own build hardware.\n\nWhen you try the GPU-enabled runners, please share your experience in our [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/403008).\n\nCompute-heavy workloads can take a long time. A known problem is timeouts after three hours because of the current [configuration of GitLab SaaS runners](https://docs.gitlab.com/ee/ci/runners/#how-saas-runners-work).\nWe plan to release more powerful compute for future iterations to handle heavier workloads faster. You can follow updates about GPU-enabled runners in the [GPU-enabled runners epic](https://gitlab.com/groups/gitlab-org/-/epics/8648) and learn more in the [GPU-enabled runners documentation](https://docs.gitlab.com/ee/ci/runners/saas/gpu_saas_runner.html).\n",[703,9,109,725,704],{"slug":1360,"featured":6,"template":680},"empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners","content:en-us:blog:empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners.yml","Empowering Modelops And Hpc Workloads With Gpu Enabled Runners","en-us/blog/empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners.yml","en-us/blog/empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners",{"_path":1366,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1367,"content":1370,"config":1375,"_id":1377,"_type":14,"title":1378,"_source":16,"_file":1379,"_stem":1380,"_extension":19},"/en-us/blog/enhance-application-quality-with-ai-powered-test-generation",{"noIndex":6,"title":1368,"description":1369},"Enhance application quality with AI-powered test generation","Learn how GitLab Duo with Amazon Q improves the QA process by automatically generating comprehensive unit tests.",{"title":1368,"description":1369,"authors":1371,"heroImage":1309,"date":1372,"body":1373,"category":10,"tags":1374},[720],"2025-07-03","You know how critical application quality is to your customers and reputation. However, ensuring that quality through comprehensive testing can feel like an uphill battle. You're dealing with time-consuming manual processes, inconsistent test coverage across your team, and those pesky issues that somehow slip through the cracks. It's frustrating when your rating drops because quality assurance becomes a bottleneck rather than a safeguard.\n\nHere's where [GitLab Duo with Amazon Q ](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/), which delivers agentic AI throughout the software development lifecycle for AWS customers, can help transform your QA process. This AI-powered capability can automatically generate comprehensive unit tests for your code, dramatically accelerating your quality assurance workflow. Instead of spending hours writing tests manually, you can let AI analyze your code and create tests that ensure optimal coverage and consistent quality across your entire application.\n\n## How GitLab Duo with Amazon Q works\n\nSo how does this work? Let's walk through the process together.\nWhen you're working on a new feature, you start by selecting the Java class you've added to your project through a merge request. You simply navigate to your merge request and click on the \"Changes\" tab to see the new code you've added.\n\nNext, you invoke Amazon Q by entering a quick action command. All you need to do is type `/q test` in the issue comment box. It's that simple – just a forward slash, the letter \"q\", and the word \"test\".\n\nOnce you hit enter, Amazon Q springs into action. It analyzes your selected code, understanding its structure, logic, and purpose. The AI examines your class methods, dependencies, and potential edge cases to determine what tests are needed.\n\nWithin moments, Amazon Q generates comprehensive unit test coverage for your new class. It creates tests that cover not just the happy path, but also edge cases and error conditions you might have overlooked. The generated tests follow your project's existing patterns and conventions, ensuring they integrate seamlessly with your codebase.\n\n## Why use GitLab Duo with Amazon Q?\n\nHere's the bottom line: You started with a critical challenge – maintaining high-quality applications while dealing with time constraints and inconsistent testing practices. GitLab Duo with Amazon Q addresses this by automating the test generation process, ensuring optimal code coverage and consistent testing standards. The result? Issues are detected before deployment, your applications maintain their quality, and you can develop software faster without sacrificing reliability.\n\nKey benefits of this feature:\n\n* Significantly reduces time spent writing unit tests\n* Ensures comprehensive test coverage across your codebase\n* Maintains consistent testing quality across all team members\n* Catches issues before they reach production\n* Accelerates your overall development velocity\n\nReady to see this game-changing feature in action? Watch how GitLab Duo with Amazon Q can transform your quality assurance process:\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/pxlYJVcHY28?si=MhIz6lnHxc6kFhlL\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Get started with GitLab Duo with Amazon Q today\n\nWant to learn more about GitLab Duo with Amazon Q? Visit the [GitLab and AWS partner page](https://about.gitlab.com/partners/technology-partners/aws/) for detailed information.\n\n## Agentic AI resources\n- [Agentic AI guides and resources](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n- [GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/)\n- [GitLab Duo with Amazon Q documentation](https://docs.gitlab.com/user/duo_amazon_q/)",[9,725,932,703,724,787],{"featured":91,"template":680,"slug":1376},"enhance-application-quality-with-ai-powered-test-generation","content:en-us:blog:enhance-application-quality-with-ai-powered-test-generation.yml","Enhance Application Quality With Ai Powered Test Generation","en-us/blog/enhance-application-quality-with-ai-powered-test-generation.yml","en-us/blog/enhance-application-quality-with-ai-powered-test-generation",{"_path":1382,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1383,"content":1389,"config":1395,"_id":1397,"_type":14,"title":1398,"_source":16,"_file":1399,"_stem":1400,"_extension":19},"/en-us/blog/enhance-data-security-with-custom-pii-detection-rulesets",{"title":1384,"description":1385,"ogTitle":1384,"ogDescription":1385,"noIndex":6,"ogImage":1386,"ogUrl":1387,"ogSiteName":693,"ogType":694,"canonicalUrls":1387,"schema":1388},"Strengthen data security with custom PII detection rulesets","This tutorial explains how GitLab's customizable Secret Detection rulesets enhance data security by identifying PII patterns in code repositories. Learn how AI can help.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097701/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%285%29_1iy516k40hwBDChKcUJ2zb_1750097700983.png","https://about.gitlab.com/blog/enhance-data-security-with-custom-pii-detection-rulesets","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Strengthen data security with custom PII detection rulesets\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2025-04-01\",\n      }",{"title":1384,"description":1385,"authors":1390,"heroImage":1386,"date":1392,"body":1393,"category":678,"tags":1394},[1391],"Fernando Diaz","2025-04-01","Protecting sensitive information is more critical than ever. GitLab's Secret Detection feature provides a powerful solution to identify and prevent the exposure of sensitive data. This tutorial explores how GitLab Secret Detection works, how to create custom rulesets for finding personally identifiable information, and how GitLab Duo Chat can streamline the creation of regex patterns for PII detection.\n\n## Understanding GitLab Secret Detection\n\n[GitLab Secret Detection](https://docs.gitlab.com/user/application_security/secret_detection/) is a security scanning feature integrated into the GitLab CI/CD pipeline. It automatically scans your codebase to identify hardcoded secrets, credentials, and other sensitive information that shouldn't be stored in your repository.\n\n### Key benefits\n\n* **Data breach prevention** detects secrets before they're committed to your repository.  \n* **Automated scanning** runs as part of your CI/CD pipeline without manual intervention.  \n* **Customizable rules** extend detection capabilities with custom patterns.  \n* **Compliance support** helps meet regulatory requirements like GDPR, HIPAA, and the California Privacy Protection Act.\n\n## Create custom rulesets for PII detection\n\nWhile GitLab's default secret detection covers common secrets like API keys and passwords, you may need custom rules to identify specific types of PII relevant to your organization. \n\nTo get started, create a new GitLab project and follow the steps below. You can follow along and see usage examples in our [PII Demo Application](https://gitlab.com/gitlab-da/tutorials/security-and-governance/devsecops/secret-scanning/pii-data-ruleset).\n\n**Step 1: Set up Secret Detection**\n\nEnsure Secret Detection is enabled in your `.gitlab-ci.yml` file:\n\n```\ninclude:\n  - template: Security/Secret-Detection.gitlab-ci.yml\n\nsecret_detection:\n  variables:\n    SECRET_DETECTION_EXCLUDED_PATHS: \"rules,.gitlab,README.md,LICENSE\"\n    SECRET_DETECTION_HISTORIC_SCAN: \"true\"\n```\n\n**Step 2: Create a custom ruleset file**\n\nCreate the directory and file `rules/pii-data-extenson.toml`, which contains the regex patterns for PII data along with an allowlist of patterns to ignore. Below are patterns to detect passport numbers (USA), phone numbers (USA), and email addresses:\n\n```toml\n[extend]\n# Extends default packaged ruleset, NOTE: do not change the path.\npath = \"/gitleaks.toml\"\n\n# Patterns to ignore (used for tests)\n[allowlist]\ndescription = \"allowlist of patterns and paths to ignore in detection\"\nregexTarget = \"match\"\nregexes = ['''555-555-5555''', '''user@example.com''']\npaths = ['''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)''']\n\n# US Passport Number (USA)\n[[rules]]\nid = \"us_passport_detection\"\ntitle = \"US Passport Number\"\ndescription = \"Detects US passport numbers\"\nregex = '''\\b[A-Z]{1,2}[0-9]{6,9}\\b'''\nkeywords = [\"passport\"]\n\n# Phone Number (USA)\n[[rules]]\nid = \"us_phone_number_detection_basic\"\ntitle = \"US Phone Number\"\ndescription = \"Detects US phone numbers in basic format\"\nregex = '''\\b\\d{3}-\\d{3}-\\d{4}\\b'''\nkeywords = [\"phone\", \"mobile\"]\n\n# Email Address\n[[rules]]\nid = \"email_address\"\ntitle = \"Email Address\"\ndescription = \"Detects email addresses\"\nregex = '''[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}'''\nkeywords = [\"email\", \"e-mail\"]\n```\n**Step 3: Extend Secret Detection with the custom ruleset file**\n\nCreate a directory and file `.gitlab/secret-detection-ruleset.toml` in the root of your repository. This file allows you to extend the standard configuration with the PII rules file, and overwrite the severity of the detected vulnerabilities (default severity is `Critical`).\n\n```\n# Define the pii rules to add to default configuration\n[[secrets.passthrough]]\ntype = \"file\"\ntarget = \"gitleaks.toml\"\nvalue = \"rules/pii-data-extension.toml\"\n\n# Overwrite Phone Number (USA) PII Severity\n[[secrets.ruleset]]\n[secrets.ruleset.identifier]\ntype = \"gitleaks_rule_id\"\nvalue = \"us_phone_number_detection_basic\"\n[secrets.ruleset.override]\nseverity = \"Medium\"\n\n# Overwrite Email Address PII Severity\n[[secrets.ruleset]]\n[secrets.ruleset.identifier]\ntype = \"gitleaks_rule_id\"\nvalue = \"email_address\"\n[secrets.ruleset.override]\nseverity = \"Low\"\n```\n\n**Step 4: Commit your changes**\n\nNow add the changes in the above steps to your project.\n\n```\ncd /path/to/your/project\ngit add .\ngit commit -m \"Add PII data ruleset and Secret Scanning\"\ngit push\n```\n\nOnce the code is committed, Secret Detection will run within the default branch.\n\n**Step 5: Test detection of PII data**\n\nNow that we have configured the Secret Detection scanner, we should perform a test to see if the scanner is detecting the new custom patterns. This can be done by creating a merge request, which adds a new file named `customer-data.yaml` with the following:\n\n```yaml  \ncustomers:  \n  test_user:  \n    phone_number: 555-555-555  \n    email: user@example.com  \n  justin_case:  \n    phone_number: 512-123-4567  \n    passport_number: A12345678  \n    email: justin_case@example.com  \n  chris_p_bacon: \n    phone_number: 305-123-4567  \n    passport_number: B09876543  \n    email: chris_p_bacon@example.com  \n```\n\nThe scanner should now perform the following:\n\n* Ignore the `phone_number` and `email` of `test_user` due to patterns being in allowlist  \n* Detect six potential vulnerabilities due to the information present for both `justin_case` and `chris_p_bacon`\n  * U.S. passport number severity is set to `Critical` (default)  \n  * U.S. phone number severity is set to `Medium` (override)  \n  * Email address severity is set to `Low` (override)\n  * Data from rules override is added to each vulnerability\n\nOnce the [merge request](https://gitlab.com/gitlab-da/tutorials/security-and-governance/devsecops/secret-scanning/pii-data-ruleset/-/merge_requests/4) is submitted, the Secret Detection scanner runs and provides the following results:\n\n![Secret Detection finding custom PII data MR](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097709/Blog/Content%20Images/Blog/Content%20Images/pii_vulns_aHR0cHM6_1750097709683.png)\n\nWhen clicking on a vulnerability, you are presented with detailed vulnerability data based on what was configured in your newly set up rules:\n\n![Expanded custom PII data vulnerability](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097710/Blog/Content%20Images/Blog/Content%20Images/pii_vulns_expanded_aHR0cHM6_1750097709684.png)\n\nThis data allows you to determine the validity of the data present and address it accordingly.\n\nThere are additional ways to configure custom rulesets. For example, rules can be applied remotely to several projects, avoiding the need to duplicate the rules file. See the [Secret Detection Configuration documentation](https://docs.gitlab.com/user/application_security/secret_detection/pipeline/configure/) for more information.\n\n## Common PII types to consider\n\nWhen building your custom ruleset, consider including patterns for:\n\n* Social Security Numbers  \n* Credit card numbers  \n* Driver's license numbers  \n* Passport numbers  \n* Email addresses  \n* Phone numbers  \n* IP addresses  \n* Physical addresses  \n* Medical record numbers\n\n## Leveraging GitLab Duo Chat for pattern creation\n\nCreating effective regex patterns can be challenging. This is where [GitLab Duo Chat](https://docs.gitlab.com/user/gitlab_duo_chat/) comes in handy. GitLab Duo Chat is an AI-powered assistant that accelerates development with contextual, conversational AI. Chat is available to any user with an assigned GitLab Duo seat.\n\n### Using GitLab Duo Chat to generate PII regex patterns\n\n1. **Access GitLab Duo Chat:** In the upper right corner, select __GitLab Duo Chat__. A drawer opens on the right side of your screen.\n\n2. **Prompt the chat:** Enter a prompt in the chat input box and press __Enter__ or select __Send__:\n\n```\nI need a regex pattern to detect Canadian Zip Codes in our codebase for secret detection\n```\n\n3. **Review and refine:** GitLab Duo Chat will suggest a regex pattern that you can review and refine:\n\n```\nCanadian postal codes follow the format \"A1A 1A1\" (where A is a letter and 1 is a digit). Here's a regex pattern that will detect Canadian postal codes in your codebase, accounting for both spaced and non-spaced formats:\n[A-Za-z]\\d[A-Za-z][ -]?\\d[A-Za-z]\\d\nThis pattern matches:\n* A letter, followed by a digit, followed by a letter\n* An optional space or hyphen\n* A digit, followed by a letter, followed by a digit\nIf you need a more strict pattern that only matches uppercase letters (as official Canadian postal codes use), you can use:\n[A-Z]\\d[A-Z][ -]?\\d[A-Z]\\d\nFor implementation in a secret detection tool, you might want to add word boundaries to avoid partial matches:\n\\b[A-Z]\\d[A-Z][ -]?\\d[A-Z]\\d\\b\nWould you like more details about implementing this in a specific secret detection system?\n```\n\n4. **Test the pattern:** Test the pattern against sample data to ensure it works correctly. You can use sites like [https://regex101.com/](https://regex101.com/) to validate patterns before you submit them to your ruleset.\n\n5. **Add to your ruleset:** Once satisfied, add the pattern to your `rules/pii-data-extension.toml` file and perform some tests to validate. This sample [merge request](https://gitlab.com/gitlab-da/tutorials/security-and-governance/devsecops/secret-scanning/pii-data-ruleset/-/merge_requests/5/diffs) contains a newly added rule for Canadian Zip Codes based on the above.\n\nAdditionally, you can use GitLab Duo Chat in:\n\n* [The GitLab Web IDE (VS Code in the cloud)](https://docs.gitlab.com/user/project/web_ide/)  \n* VS Code, with the [GitLab Workflow extension for VS Code](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow)  \n* JetBrains IDEs, with the [GitLab Duo Plugin for JetBrains](https://plugins.jetbrains.com/plugin/22325-gitlab-duo)  \n* Visual Studio for Windows, with the [GitLab Extension for Visual Studio](https://marketplace.visualstudio.com/items?itemName=GitLab.GitLabExtensionForVisualStudio)\n\nIn the future, you’ll be able to leverage [GitLab Duo Workflow](https://docs.gitlab.com/user/duo_workflow/) (currently in private beta) to automatically generate and add these patterns to your code base directly from your IDE. GitLab Duo Workflow is an AI agent, which transforms AI from reactive assistant to autonomous contributor, optimizing your software development lifecycle. Learn more about [GitLab Duo Workflow](https://about.gitlab.com/blog/meet-gitlab-duo-workflow-the-future-of-ai-driven-development/).\n\n## Best practices for PII detection\n\n1. **Start small:** Begin with a few critical PII types and expand gradually.  \n2. **Test thoroughly:** Test your patterns against sample data to avoid false positives.  \n3. **Update regularly:** Review and update your rulesets as new PII requirements emerge.  \n4. **Document patterns:** Maintain documentation for your custom regex patterns.  \n5. **Balance precision:** Make patterns specific enough to avoid false positives but flexible enough to catch variations.  \n6. **Implement [Secret Push Protection](https://about.gitlab.com/blog/prevent-secret-leaks-in-source-code-with-gitlab-secret-push-protection/):** Prevent PII data from making it into your repository.  \n7. **Set up [Merge Request Approval Policies](https://docs.gitlab.com/user/application_security/policies/merge_request_approval_policies/):** Require approval before merging any possible PII data to your repository.\n\nOnce you have set up a PII data ruleset to meet your organization's needs, remote rulesets can scan for PII data across multiple repositories without the need to duplicate the rules file. Watch this video to learn more:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/vjJxQz918WE?si=CRdIEodo3ALxVWXO\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Handling Secret Detection findings\n\nWhen GitLab Secret Detection identifies potential PII in your code:\n\n1. **Review the finding:** Assess whether it's a legitimate finding or a false positive.  \n2. **Remediate:** Remove the sensitive data and replace it with environment variables or secrets management.  \n3. **[Redact text from repository](https://docs.gitlab.com/user/project/merge_requests/revert_changes/#redact-text-from-repository):** Permanently delete sensitive or confidential information that was accidentally committed, ensuring it's no longer accessible in your repository's history. \n4. **Track progress:** Use GitLab's security dashboard to monitor ongoing compliance.\n\n## Get started today\n\nGitLab Secret Detection, combined with custom PII rulesets, provides a powerful defense against inadvertent exposure of sensitive information. By leveraging GitLab Duo Chat to create precise regex patterns, teams can efficiently implement comprehensive PII detection across their codebase, ensuring regulatory compliance and protecting user data.\n\nRemember that secret detection is just one component of a comprehensive security strategy. Combine it with other GitLab security features like static application security testing, dynamic application security testing, and dependency scanning for a more robust security posture.\n\nStart implementing these practices today to better protect your users' personal information and maintain the security integrity of your applications.\n\n> Start [a free, 60-day trial of GitLab Ultimate and GitLab Duo ](https://about.gitlab.com/free-trial/)today!\n\n## More resources\n\nTo learn more about GitLab security and compliance and how we can help enhance your AppSec workflows, follow the links below:\n\n* [GitLab Security and Compliance Solutions](https://about.gitlab.com/solutions/security-compliance/)  \n* [GitLab DevSecOps Platform](https://about.gitlab.com/platform/)  \n* [GitLab Duo (AI)](https://about.gitlab.com/gitlab-duo/)  \n* [GitLab Application Security documentation](https://docs.gitlab.com/user/application_security/) \n* [Secret Detection documentation](https://docs.gitlab.com/user/application_security/secret_detection/)\n",[678,725,724,482,9],{"slug":1396,"featured":91,"template":680},"enhance-data-security-with-custom-pii-detection-rulesets","content:en-us:blog:enhance-data-security-with-custom-pii-detection-rulesets.yml","Enhance Data Security With Custom Pii Detection Rulesets","en-us/blog/enhance-data-security-with-custom-pii-detection-rulesets.yml","en-us/blog/enhance-data-security-with-custom-pii-detection-rulesets",{"_path":1402,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1403,"content":1408,"config":1414,"_id":1416,"_type":14,"title":1417,"_source":16,"_file":1418,"_stem":1419,"_extension":19},"/en-us/blog/explain-this-code",{"title":1404,"description":1405,"ogTitle":1404,"ogDescription":1405,"noIndex":6,"ogImage":820,"ogUrl":1406,"ogSiteName":693,"ogType":694,"canonicalUrls":1406,"schema":1407},"ML experiment: Explain this source code","Learn how GitLab is experimenting with ML-powered source code explanation features in this fourth installment of our ongoing AI/ML in DevSecOps series.","https://about.gitlab.com/blog/explain-this-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Explain this source code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2023-04-06\",\n      }",{"title":1404,"description":1405,"authors":1409,"heroImage":820,"date":1411,"body":1412,"category":10,"tags":1413},[1410],"Taylor McCaslin","2023-04-06","\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nDeciphering the source code of a new software project can be a daunting or at least time-consuming task. The code may be poorly documented, or it may be written in a programming language that is unfamiliar to the developer. Even if the developer is familiar with the programming language, the code may be complex and difficult to understand. But what if developers had a helpful tool to figure out very quickly what code was doing? With recent advancements in AI models, it's now possible to have code explained in natural language.\n\n## Explain this code with AI\nAt GitLab, we’re experimenting with AI-assisted code explanations. We want to enable software developers to quickly understand source code they encounter. Whether it's starting with a new project, contributing to a project in a language they're not fluent in, or just trying to understand historical code, we want to help developers get up to speed quickly.\n\nIn a rapid prototype, our own [Denys Mishunov](https://gitlab.com/mishunov), Staff Frontend Engineer, and [Michael Le](https://gitlab.com/mle), Senior Product Designer for our [Create::Source Code group](https://handbook.gitlab.com/handbook/product/categories/#source-code-group), leverage AI to power code explanations within [GitLab's repository source code file viewer](https://docs.gitlab.com/ee/user/project/repository/).\n\n\n![Prototype UX for Explain this Code](https://about.gitlab.com/images/blogimages/explain-this-code-hr.png){: .shadow}\n\nAbove, you can see an example of highlighting a selection of code and asking for a code explanation. Watch the full demo below.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/xzsFfFqvlnU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Iterating on AI/ML features\nWhile just an experiment today, we are iterating on how to effectively bring features like this to our customers. We're starting on the repository file viewer, and this prototype can be extended to anywhere you interact with code within GitLab, from [merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/) to [snippets](https://docs.gitlab.com/ee/user/snippets.html), and beyond.\n\nThis experiment is just the start of the ways we’re looking to infuse GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI Assisted features. We’ll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our ongoing series, \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\".\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[849,725,704,9],{"slug":1415,"featured":6,"template":680},"explain-this-code","content:en-us:blog:explain-this-code.yml","Explain This Code","en-us/blog/explain-this-code.yml","en-us/blog/explain-this-code",{"_path":1421,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1422,"content":1428,"config":1433,"_id":1435,"_type":14,"title":1436,"_source":16,"_file":1437,"_stem":1438,"_extension":19},"/en-us/blog/explain-this-vulnerability",{"title":1423,"description":1424,"ogTitle":1423,"ogDescription":1424,"noIndex":6,"ogImage":1425,"ogUrl":1426,"ogSiteName":693,"ogType":694,"canonicalUrls":1426,"schema":1427},"ML experiment: Explain this vulnerability","Learn how GitLab is experimenting with vulnerability explanation and mitigation recommendations in this latest installment of our ongoing 'AI/ML in DevSecOps' series.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664087/Blog/Hero%20Images/tanukicover.jpg","https://about.gitlab.com/blog/explain-this-vulnerability","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Explain this vulnerability\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Alana Bellucci\"}],\n        \"datePublished\": \"2023-05-02\",\n      }",{"title":1423,"description":1424,"authors":1429,"heroImage":1425,"date":1430,"body":1431,"category":10,"tags":1432},[1294],"2023-05-02","\n\n\u003Ci>This blog is the latest post an ongoing series about GitLab’s journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The first blog post can be found [here](/blog/what-the-ml-ai/). Throughout the series, we’ll feature blogs from our product, engineering, and UX teams to showcase how we’re infusing AI/ML into GitLab.\u003C/i>\n\nGitLab surfaces vulnerabilities that contain relevant information. However, more often users aren't sure where to start. \nIt takes time to research and synthesize information that is surfaced within the vulnerability record. Moreover, figuring out how to fix a given vulnerability can be difficult.\n\nTo help teams identify an effective way to fix a vulnerability within the context of their specific code base, we have released an [experimental](https://docs.gitlab.com/ee/policy/experiment-beta-support.html#experiment)\n feature that provides GitLab AI-assisted vulnerability recommendations leveraging the explanatory power of large language models. This capability combines basic vulnerability \n information with insights derived from the customer's code to explain the vulnerability in context, demonstrate how it can be exploited, and provide an example fix.\n\n[Isaac Dawson](https://gitlab.com/idawson) and [Dinesh Bolkensteyn](https://gitlab.com/dbolkensteyn), both [GitLab Vulnerability Research](/handbook/engineering/development/sec/secure/vulnerability-research/) \nengineers, tested prompts in a large language model to see if prompts could yield helpful results. After fine-tuning the prompts, they found that some prompts could provide better details\n and even suggest recommendations for a fix to vulnerabilities related to static application security testing ([SAST](https://docs.gitlab.com/ee/user/application_security/sast/)). \n In a week's time, Product Designer [Becka Lippert](https://gitlab.com/beckalippert) designed a prototype and [Daniel Tian](https://gitlab.com/dftian), \n [Mo Khan](https://gitlab.com/mokhax), and [Neil McCorrison](https://gitlab.com/nmccorrison) built this experimental feature in GitLab.\n\n![Explain and mitigate this vulnerability with AI](https://about.gitlab.com/images/blogimages/2023-04-27-explain-this-vulnerability.png){: .shadow}\n\n\nThis feature is powered by Google AI. Learn more about [our partnership with Google Cloud](https://about.gitlab.com/press/releases/2023-05-02-gitLab-and-google-cloud-partner-to-expand-ai-assisted-capabilities.html) to enrich GitLab features with generative AI.\n\nYou can explore the \"explain this vulnerability\" feature with a [click-through demo](https://go.gitlab.com/0qIe3O).\n\n## Iterating on AI/ML features\n\nThis [experimental](https://docs.gitlab.com/ee/policy/experiment-beta-support.html#experiment) feature is available in GitLab.com today. We are exploring what this feature could look like for \nother types of vulnerabilities beyond SAST and in a merge request. Have an idea that would make this feature better? Please share it with us, along with any feedback, in this \n[issue](https://gitlab.com/gitlab-org/gitlab/-/issues/407295).\n\nThis experiment is just the start of the ways we're looking to infuse GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI Assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our ongoing series, \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\".\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[1135,704,703,9],{"slug":1434,"featured":6,"template":680},"explain-this-vulnerability","content:en-us:blog:explain-this-vulnerability.yml","Explain This Vulnerability","en-us/blog/explain-this-vulnerability.yml","en-us/blog/explain-this-vulnerability",{"_path":1440,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1441,"content":1446,"config":1452,"_id":1454,"_type":14,"title":1455,"_source":16,"_file":1456,"_stem":1457,"_extension":19},"/en-us/blog/extending-code-suggestions",{"title":1442,"description":1443,"ogTitle":1442,"ogDescription":1443,"noIndex":6,"ogImage":820,"ogUrl":1444,"ogSiteName":693,"ogType":694,"canonicalUrls":1444,"schema":1445},"ML experiment: Extending Code Suggestions to more development environments","Learn how GitLab is experimenting with extending Code Suggestions to Visual Studio, JetBrains IDE, Neovim, and other environments in our ongoing 'AI/ML in DevSecOps' series.","https://about.gitlab.com/blog/extending-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Extending Code Suggestions to more development environments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kai Armstrong\"}],\n        \"datePublished\": \"2023-06-01\",\n      }",{"title":1442,"description":1443,"authors":1447,"heroImage":820,"date":1449,"body":1450,"category":10,"tags":1451},[1448],"Kai Armstrong","2023-06-01","\n\n\u003Ci>This blog is the latest post in an ongoing series about GitLab's journey to \u003Ca href=\"/blog/ai-ml-in-devsecops-series/\">build and integrate AI/ML into our DevSecOps platform\u003C/a>. The first blog post can be found \u003Ca href=\"/blog/what-the-ml-ai/\">here\u003C/a>. Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nWe've been continuing to [extend the reach](/blog/code-suggestions-for-all-during-beta/) of GitLab Code Suggestions and make [improvements](/blog/code-suggestions-improves-developer-productivity/) to enhance developer productivity. Continuing with our theme of experimentation and iteration, we're now announcing experimental support for Code Suggestions in Visual Studio, JetBrains IDEs, Neovim, and other development environments.\n\n## Code Suggestions for Visual Studio\n\nIn this rapid prototype, [Michael Eddington](https://gitlab.com/mikeeddington), Staff Backend Engineer, built an extension to bring GitLab Code Suggestions to Visual Studio. With this experiment, you can begin writing code and have suggestions provided to help accelerate your development efforts while you type.\n\n![GitLab Code Suggestions in Visual Studio](https://about.gitlab.com/images/blogimages/code-suggestions-visual-studio-ide.gif){: .shadow}\n\nYou can follow our instructions for [getting started](https://gitlab.com/gitlab-org/editor-extensions/experiments/gitlab-code-suggestions-visual-studio-experiment#getting-started) to try this extension out today. Provide your feedback about this extension in [this issue](https://gitlab.com/gitlab-org/editor-extensions/experiments/gitlab-code-suggestions-visual-studio-experiment/-/issues/1).\n\n## Code Suggestions for JetBrains IDEs\n\nIn this rapid prototype, [Dinesh Bolkensteyn](https://gitlab.com/dbolkensteyn), Senior Vulnerability Researcher, built a plugin to bring GitLab Code Suggestions to JetBrains IDE. With this experiment, you can begin writing code and have suggestions provided to help accelerate your development efforts while you type.\n\n![GitLab Code Suggestions in JetBrains IDE](https://about.gitlab.com/images/blogimages/code-suggestions-jetbrains-ide.gif){: .shadow}\n\nYou can follow our instructions for [getting started](https://gitlab.com/gitlab-org/editor-extensions/experiments/gitlab-code-suggestions-jetbrains-experiment#getting-started) to try it out today. Provide your feedback about this extension in [this issue](https://gitlab.com/gitlab-org/editor-extensions/experiments/gitlab-code-suggestions-jetbrains-experiment/-/issues/2).\n\n## Code Suggestions Language Server for Neovim and more\n\nIn this rapid prototype, [Julian Thome](https://gitlab.com/julianthome), Staff Vulnerability Research Engineer, and [Michael Henriksen](https://gitlab.com/mhenriksen), Senior Vulnerability Research Engineeer, developed a language server that leverages the Language Server Protocol to provide GitLab Code Suggestions in Neovim or any other [editor with LSP support](https://microsoft.github.io/language-server-protocol/implementors/tools/). \n\n![GitLab Code Suggestions in Neovim](https://about.gitlab.com/images/blogimages/code-suggestions-neovim.gif){: .shadow}\n\nThis language server allows you to configure any supporting editor or IDE to start receiving GitLab Code Suggestions as you type. We've provided instructions for getting started with [Neovim](https://gitlab.com/gitlab-org/editor-extensions/experiments/gitlab-code-suggestions-language-server-experiment/-/blob/main/docs/nvim.md), [Sublime Text](https://gitlab.com/gitlab-org/editor-extensions/experiments/gitlab-code-suggestions-language-server-experiment/-/blob/main/docs/sublime.md), and [Emacs](https://gitlab.com/gitlab-org/editor-extensions/experiments/gitlab-code-suggestions-language-server-experiment/-/blob/main/docs/emacs.md) for your convenience. Provide your feedback about these integrations in [this issue](https://gitlab.com/gitlab-org/editor-extensions/experiments/gitlab-code-suggestions-language-server-experiment/-/issues/2).\n\n## Iterating on AI/ML features\n\nWhile these are just experiments today, we are iterating on how to effectively bring mature IDE integrations like these to our customers. We'll continue to refine these integrations and improve the experience to provide you with streamlined code suggestions while you work, wherever you choose to work. We're also working to expand our scope beyond these IDEs so if you have an interest in seeing an additional editor/IDE supported, [let us know](https://gitlab.com/groups/gitlab-org/-/epics/2431).\n\nThis experiment is just the start of the ways we're infusing GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI-assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\" series.\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[703,725,704,9],{"slug":1453,"featured":6,"template":680},"extending-code-suggestions","content:en-us:blog:extending-code-suggestions.yml","Extending Code Suggestions","en-us/blog/extending-code-suggestions.yml","en-us/blog/extending-code-suggestions",{"_path":1459,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1460,"content":1463,"config":1470,"_id":1472,"_type":14,"title":1473,"_source":16,"_file":1474,"_stem":1475,"_extension":19},"/en-us/blog/fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab",{"noIndex":6,"title":1461,"description":1462},"Fast and secure AI agent deployment to Google Cloud with GitLab","Follow this step-by-step guide, complete with a demo application, to learn how to use agentic AI, along with GitLab's native integrations and CI/CD components.",{"title":1461,"description":1462,"authors":1464,"heroImage":1466,"date":1467,"body":1468,"category":10,"tags":1469},[1465],"Regnard Raquedan","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670563/Blog/Hero%20Images/cloudcomputing.jpg","2025-07-07","[Agentic AI](https://about.gitlab.com/topics/agentic-ai/) is transforming\nhow we build intelligent applications, but deploying AI agents securely and\nefficiently can be challenging. In this tutorial, you'll learn how to deploy\nan AI agent built with Google's Agent Development Kit\n([ADK](https://cloud.google.com/vertex-ai/generative-ai/docs/agent-development-kit/quickstart))\nto Cloud Run using [GitLab's native\nintegrations](https://cloud.google.com/blog/topics/partners/understand-the-google-cloud-gitlab-integration)\nand [CI/CD components](https://docs.gitlab.com/ci/components/).\n\n\n## What are AI agents and why do they matter?\n\n\nAgentic AI represents a significant evolution in artificial intelligence. Unlike traditional generative AI tools that require constant human direction, AI agents leverage advanced language models and natural language processing to take independent action. These systems can understand requests, make decisions, and execute multistep plans to achieve goals autonomously.\n\n\nThis tutorial uses Google's ADK, a flexible and modular framework for developing and deploying AI agents. While optimized for Gemini and the Google ecosystem, ADK is model-agnostic, deployment-agnostic, and built for compatibility with other frameworks.\n\n\n## Our demo application: Canada City Advisor\n\n\nTo demonstrate the deployment process, we'll work with a practical example: the Canada City Advisor. This AI agent helps users find their ideal Canadian city based on their preferences and constraints.\n\n\nHere's how it works:\n\n\n* Users input their budget requirements and lifestyle preferences.  \n\n* The root agent coordinates two sub-agents:  \n\n  * A budget analyzer agent that evaluates financial constraints. This draws data obtained from the Canada Mortgage and Housing Corporation.  \n  * A lifestyle preferences agent that matches cities to user needs. This includes a weather service that uses [Open-Meteo](https://open-meteo.com/) to get the proper city information.  \n* The system generates personalized city recommendations\n\n\nThis multi-agent architecture showcases the power of agentic AI - different specialized agents working together to solve a complex problem. The sub-agents are only invoked when the root agent determines that budget and lifestyle analysis are needed.\n\n\n![Multi-agent architecture to develop demo application with agentic AI](https://res.cloudinary.com/about-gitlab-com/image/upload/v1751576568/obgxpxvlnxtzifddrrz1.png)\n\n\n## Prerequisites\n\n\nBefore we begin, ensure you have:\n\n\n* A Google Cloud project with the following APIs enabled:  \n\n  * Cloud Run API  \n  * Artifact Registry API  \n  * Vertex AI API  \n* A GitLab project for your source code  \n\n* Appropriate permissions in both GitLab and Google Cloud\n\n\n**Step 1: Set up IAM integration with Workload Identity Federation**\n\n\nThe first step establishes secure, keyless authentication between GitLab and Google Cloud using [Workload Identity Federation](https://cloud.google.com/iam/docs/workload-identity-federation). This eliminates the need for service account keys and improves security.\n\n\nIn your GitLab project:\n\n\n1. Navigate to **Settings > Integrations > Google Cloud IAM.**  \n\n2. Provide the following information:  \n\n   * **Project ID**: Your Google Cloud project ID  \n   * **Project Number**: Found in your Google Cloud console  \n   * **Pool ID**: A unique identifier for your workload identity pool  \n   * **Provider ID**: A unique identifier for your identity provider\n\nGitLab will generate a script for you. Copy this script and run it in your Google Cloud Shell to create the Workload Identity Federation.\n\n\n**Step 2: Configure Google Artifact Registry integration**\n\n\nNext, we'll set up the connection to Google Artifact Registry where our container images will be stored.\n\n\n1. In GitLab, go to **Settings > Integrations > Google Artifact Registry.**  \n\n2. Enter:  \n\n   * **Google Cloud Project ID**: Same as in Step 1  \n   * **Repository Name**: Name of an existing Artifact Registry repository  \n   * **Location**: The region where your repository is located\n\n**Important**: The repository must already exist in Artifact Registry. GitLab won't create a new one for you in this context.\n\n\nGitLab will generate commands to set up the necessary permissions. Run these in Google Cloud Shell.\n\n\nAdditionally, add these roles to your service principal for Cloud Run deployment:\n\n\n* `roles/run.admin`  \n\n* `roles/iam.serviceAccountUser`  \n\n* `roles/cloudbuild.builds.editor`\n\n\nYou can add these roles using the following gcloud commands:\n\n\n```shell\n\nGCP_PROJECT_ID=\"\u003Cyour-project-id>\" #replace\n\nGCP_PROJECT_NUMBER=\"\u003Cyour-project-number>\" #replace\n\nGCP_WORKLOAD_IDENTITY_POOL=\"\u003Cyour-pool-id>\" #replace\n\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/run.admin'\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/iam.serviceAccountUser'\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/cloudbuild.builds.editor'\n```\n\n\n**Step 3: Create the CI/CD pipeline**\n\n\nNow for the exciting part – let's build our deployment pipeline! GitLab's CI/CD components make this remarkably simple.\n\n\nCreate a `.gitlab-ci.yml` file in your project root:\n\n\n```unset\n\nstages:\n  - build\n  - test\n  - upload\n  - deploy\n\nvariables:\n  GITLAB_IMAGE: $CI_REGISTRY_IMAGE/main:$CI_COMMIT_SHORT_SHA\n  AR_IMAGE: $GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_LOCATION-docker.pkg.dev/$GOOGLE_ARTIFACT_REGISTRY_PROJECT_ID/$GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_NAME/main:$CI_COMMIT_SHORT_SHA\n\nbuild:\n  image: docker:24.0.5\n  stage: build\n  services:\n    - docker:24.0.5-dind\n  before_script:\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n  script:\n    - docker build -t $GITLAB_IMAGE .\n    - docker push $GITLAB_IMAGE\n\ninclude:\n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Dependency-Scanning.gitlab-ci.yml\n  - template: Jobs/SAST.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/SAST.gitlab-ci.yml\n  - template: Jobs/Secret-Detection.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml\n  - component: gitlab.com/google-gitlab-components/artifact-registry/upload-artifact-registry@main\n    inputs:\n      stage: upload\n      source: $GITLAB_IMAGE\n      target: $AR_IMAGE\n  - component: gitlab.com/google-gitlab-components/cloud-run/deploy-cloud-run@main\n    inputs:\n      stage: deploy\n      project_id: \"\u003Cyour-project-id>\" #replace\n      service: \"canadian-city\"\n      region: \"us-central1\"\n      image: $AR_IMAGE\n```\n\n\nThe pipeline consists of four stages:\n\n\n1. **Build**: Creates the Docker container with your AI agent  \n\n2. **Test**: Runs security scans (container scanning, dependency scanning, SAST)  \n\n3. **Upload**: Pushes the container to Artifact Registry  \n\n4. **Deploy**: Deploys to Cloud Run\n\n\nThe great thing about using [GitLab's CI/CD components](https://docs.gitlab.com/ci/components/) is that you only need to provide a few parameters - the components handle all the complex authentication and deployment logic.\n\n\n**Step 4: Deploy and test**\n\n\nWith everything configured, it's time to deploy:\n\n\n1. Commit your code and `.gitlab-ci.yml` to your GitLab repository.  \n\n2. The pipeline will automatically trigger.  \n\n3. Monitor the pipeline progress in GitLab's CI/CD interface.  \n\n4. Once complete, find your Cloud Run URL in the Google Cloud Console.\n\n\nYou'll see each stage execute:\n\n\n* Build stage creates your container.  \n\n* Test stage runs comprehensive security scans.  \n\n* Upload stage pushes to Artifact Registry.  \n\n* Deploy stage creates or updates your Cloud Run service.\n\n\n## Security benefits\n\n\nThis approach provides several security advantages:\n\n\n* **No long-lived credentials:** Workload Identity Federation eliminates service account keys.  \n\n* **Automated security scanning:** Every deployment is scanned for vulnerabilities.  \n\n* **Audit trail:** Complete visibility of who deployed what and when.  \n\n* **Principle of least privilege:** Fine-grained IAM roles limit access.\n\n\n## Summary\n\nBy combining GitLab's security features with Google Cloud's powerful AI and serverless platforms, you can deploy AI agents that are both secure and scalable. The integration between GitLab and Google Cloud eliminates much of the complexity traditionally associated with such deployments.\n\n> Use this tutorial's [complete code\nexample](https://gitlab.com/gitlab-partners-public/google-cloud/demos/ai-agent-deployment)\nto get started now. Not a GitLab customer yet? Explore the DevSecOps platform with [a free trial](https://about.gitlab.com/free-trial/).\n",[9,1038,724],{"featured":6,"template":680,"slug":1471},"fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab","content:en-us:blog:fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab.yml","Fast And Secure Ai Agent Deployment To Google Cloud With Gitlab","en-us/blog/fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab.yml","en-us/blog/fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab",{"_path":1477,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1478,"content":1484,"config":1491,"_id":1493,"_type":14,"title":1494,"_source":16,"_file":1495,"_stem":1496,"_extension":19},"/en-us/blog/gitlab-16-ai-and-security-take-center-stage",{"title":1479,"description":1480,"ogTitle":1479,"ogDescription":1480,"noIndex":6,"ogImage":1481,"ogUrl":1482,"ogSiteName":693,"ogType":694,"canonicalUrls":1482,"schema":1483},"GitLab 16: AI and security take center stage","Our GitLab 16 launch event showcased our AI-powered workflows that drive usability improvements, security enhancements, and observability advancements.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671793/Blog/Hero%20Images/16_0-cover-image.png","https://about.gitlab.com/blog/gitlab-16-ai-and-security-take-center-stage","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab 16: AI and security take center stage\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2023-06-30\",\n      }",{"title":1479,"description":1480,"authors":1485,"heroImage":1481,"date":1487,"body":1488,"category":10,"tags":1489},[1486],"David DeSanto, Chief Product Officer, GitLab","2023-06-30","\nThe new era of DevSecOps is here – and its focus is on improving everyone’s experience through AI-powered workflows that drive usability improvements, security enhancements, and observability advancements.\n\nAt our recent GitLab 16 launch event, we highlighted how our platform has evolved to fuel productivity and efficiency, which are top of mind in 2023, according to GitLab’s [2023 Global DevSecOps Report: Productivity & Efficiency Within Reach](https://about.gitlab.com/developer-survey/).\n\n> If you missed the GitLab 16 event, it’s [available on demand](https://about.gitlab.com/sixteen/), so it’s not too late.\n\nWhat I find most exciting about the launch of [GitLab 16](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/) is that it marks a significant milestone for our customers, as well as for GitLab, and heralds the era of AI-powered DevSecOps. We’ve built upon the nearly 500 new capabilities we introduced with GitLab 15 and have continued that upward trajectory. \n\nThe GitLab 16 event showcased amazing new capabilities across our entire DevSecOps platform, which is reinforced by our significant investments in critical areas: \n\n1. Building a world-class DevSecOps experience that includes significant usability improvements, additional collaboration capabilities, and [AI-assisted workflows](https://about.gitlab.com/blog/ai-ml-in-devsecops-series/). \n2. Providing advanced security and compliance, deepening our capabilities, and bringing [software supply chain security](https://about.gitlab.com/blog/the-ultimate-guide-to-software-supply-chain-security/) to the forefront of software development.\n3. Bringing observability, analytics, and feedback into our DevSecOps platform, [empowering organizations to close the SDLC loop](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html) with user data.\n4. Offering GitLab for data science workloads to enable data scientists and data engineers to benefit from all the value our DevSecOps Platform provides, including collaboration, reproducibility, and streamlined deployment into production.\n\n## Introducing GitLab Duo\nAs part of GitLab 16, we [unveiled GitLab Duo](https://about.gitlab.com/blog/meet-gitlab-duo-the-suite-of-ai-capabilities/), our suite of enterprise-grade AI capabilities powering DevSecOps workflows. GitLab Duo is integrated directly into the DevSecOps platform, enabling you to write better code faster and more efficiently. And GitLab Duo goes well beyond code creation to deliver AI-assisted workflows at all stages of the software development lifecycle, such as security testing and analysis, observability, and proactive vulnerability detection. Our goal is to help you achieve a 10x improvement in workflow efficiency by tapping into all of the DevSecOps platform’s AI capabilities. \n\nGitLab Duo includes Code Suggestions, Explain this Code, Explain this Vulnerability, Summarize Issue Comments, Chat, and more.\n\nLearn about the [powerful features of GitLab Duo](https://about.gitlab.com/gitlab-duo/).\n\n## Joining forces with Google Cloud\nAt the launch June Yang, VP of Cloud AI and Industry Solutions at Google Cloud, discussed our partnership with [Google Cloud](https://about.gitlab.com/partners/technology-partners/google-cloud-platform/), (as recently [announced in May](https://about.gitlab.com/press/releases/2023-05-02-gitlab-and-google-cloud-partner-to-expand-ai-assisted-capabilities/)) in which we’re building several joint solutions that will help enterprise customers to improve the efficiency, effectiveness, and quality of their software development processes.\n\nBoth GitLab and Google Cloud are committed to developing new AI-powered solutions that help businesses improve their software development processes and protect their data.\n\nGitLab's vision for generative AI is grounded in privacy, security, and transparency. The partnership with Google Cloud enables GitLab to offer private and secure AI-powered features, while ensuring customer intellectual property (i.e., their source code) stays theirs and will not be used for training and fine-tuning of AI models. \n\n## CARFAX’s DevSecOps results\nMark Portofe, Director of Platform Engineering at CARFAX, also joined us for our GItLab 16 launch. It was enlightening to hear how CARFAX has been using GitLab since 2017 to make them more productive and more secure. Mark shared how CARFAX can now [create CI/CD pipelines](https://about.gitlab.com/solutions/continuous-integration/) in hours instead of the days or even weeks it took before – freeing up their developers’ time to focus on getting code to production. As a result, their number of production deployments has increased by 20% year over year.\n\n## GitLab Dedicated\nAs part of our GitLab 16 event, we also shared that [GitLab Dedicated](https://about.gitlab.com/dedicated/), our single-tenant SaaS offering of GitLab’s DevSecOps platform designed to address the needs of customers with stringent compliance requirements, is now generally available. \n\nWith GitLab Dedicated, organizations can access all of the benefits of the DevSecOps platform delivered as a SaaS offering – including faster releases, better security, and more productive developers – while satisfying compliance requirements such as data residency, isolation, and private networking.\n\n## Value Stream Analytics and Dashboards\nWhen it comes to observability, analytics, and feedback, our single application shines by providing end-to-end metrics and insights. We see this being very much native to our DevSecOps platform.\n\nWe’ve made great strides with [Value Streams Dashboards](https://www.youtube.com/watch?v=EA9Sbks27g4), a  popular feature with our customers. These dashboards combine DORA 4 metrics with GitLab-specific metrics to give organizations insights into the health of their software delivery, identifying areas of efficiency and areas for improvement. \n\nWe are also introducing [Product Analytics](https://about.gitlab.com/blog/introducing-product-analytics-in-gitlab/) in GitLab 16 to close the DevSecOps loop with user metrics and feedback from the applications that organizations are building with GitLab, which they can incorporate into their planning efforts. \n\n## Watch the GitLab 16 launch event\nI’m really proud of the work our teams put into GitLab 16 to make it a reality. To hear more and dig deeper into the amazing capabilities of GitLab 16, check out the [launch event](https://about.gitlab.com/sixteen/).\n",[9,482,1490,678],"releases",{"slug":1492,"featured":6,"template":680},"gitlab-16-ai-and-security-take-center-stage","content:en-us:blog:gitlab-16-ai-and-security-take-center-stage.yml","Gitlab 16 Ai And Security Take Center Stage","en-us/blog/gitlab-16-ai-and-security-take-center-stage.yml","en-us/blog/gitlab-16-ai-and-security-take-center-stage",{"_path":1498,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1499,"content":1505,"config":1511,"_id":1513,"_type":14,"title":1514,"_source":16,"_file":1515,"_stem":1516,"_extension":19},"/en-us/blog/gitlab-ai-assisted-features",{"title":1500,"description":1501,"ogTitle":1500,"ogDescription":1501,"noIndex":6,"ogImage":1502,"ogUrl":1503,"ogSiteName":693,"ogType":694,"canonicalUrls":1503,"schema":1504},"GitLab details AI-assisted features in the DevSecOps platform","In a fireside chat, CEO and co-founder Sid Sijbrandij shared demos of AI-assisted features available today in gitlab.com.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669077/Blog/Hero%20Images/ai-fireside-chat.png","https://about.gitlab.com/blog/gitlab-ai-assisted-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab details AI-assisted features in the DevSecOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2023-05-03\",\n      }",{"title":1500,"description":1501,"authors":1506,"heroImage":1502,"date":1508,"body":1509,"category":10,"tags":1510},[1507],"Sid Sijbrandij","2023-05-03","\nThis morning, GitLab’s Chief Financial Officer Brian Robins and I led a fireside chat focused on [GitLab’s AI strategy](https://ir.gitlab.com/news-releases/news-release-details/gitlab-hold-ai-fireside-chat-sid-sijbrandij), AI’s role in solving customer pain points, and our AI product roadmap.\n\nAI marks a big industry shift that will make it easier to develop, secure, and operate software. We plan to infuse AI throughout the software development lifecycle by incorporating it into our comprehensive enterprise DevSecOps platform.\n\nWe will lead with a customer-centric approach focused on privacy first, where customers know their intellectual property is secured. One way we are accomplishing this is with [our recently announced generative AI partnership with Google](https://about.gitlab.com/press/releases/2023-05-02-gitLab-and-google-cloud-partner-to-expand-ai-assisted-capabilities.html). This will allow GitLab to use Google's generative AI foundation models to provide customers with AI-powered offerings within our cloud infrastructure. We’ll maintain our commitment to protecting user privacy by containing customer intellectual property and source code within GitLab's cloud infrastructure.\n\nWatch the AI fireside chat:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/ejWeMdVz8Nk\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\" allowfullscreen>\u003C/iframe>\n\n\u003C/figure>\n\u003C!-- blank line -->\n\nDuring the fireside chat, we introduced AI-assisted features available to GitLab customers today on gitlab.com. We provided a live demo of these capabilities that can be utilized by everyone throughout the software development lifecycle. \n\n![List of AI-assisted capabilities](https://about.gitlab.com/images/blogimages/ai-assisted-capabilities-detailed.png){: .shadow}\n\nWe also discussed how these capabilities are focused on three personas: development, security and operations teams, and have features available for all users. Watch the demos for these capabilities available on gitlab.com today:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube-nocookie.com/embed/ILJeqWoVswM\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## AI for Developer Teams \n\n### Code Suggestions\n- Enables developers to write code more efficiently by viewing code suggestions as they type. \nLearn more about [Code Suggestions](/blog/ai-assisted-code-suggestions/).\n\n### Suggested Reviewers\n- Helps customers receive faster and higher quality reviews by automatically finding the right people to review a merge request.\nLearn more about [Suggested Reviewers](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/data_usage.html).\n\n### Summarize MR Changes\n- Helps merge request authors to drive alignment and action by efficiently communicating the impact of their changes.\nLearn more about [Summarize MR Changes](/blog/merge-request-changes-summary-ai/).\n\n### Summarize My MR Review\n- Enables better handoffs between authors and reviewers and helps reviewers efficiently understand many merge request suggestions. \nLearn more about [Summarize My MR Review](/blog/summarize-my-merge-request-review/).\n\n## AI for Security and Operations\n\n### Explain This Vulnerability\n- Helps developers remediate vulnerabilities more efficiently and uplevel their skills, enabling them to write more secure code.\nLearn more about [Explain This Vulnerability](/blog/explain-this-vulnerability/).\n\n### Generate Tests in MRs\n- Automates repetitive tasks for developers and helps them catch bugs early.\nLearn more about [Generate Tests in MRs](/blog/merge-request-suggest-a-test/).\n\n### Explain This Code\n- Allows DevSecOps teams to get up to speed quickly on code.\nLearn more about [Explain This Code](/blog/explain-this-code/).\n\n## AI for everyone\n\n### Issue Comment Summaries\n- Quickly gets everyone up to speed on lengthy conversations to ensure they are all on the same page.\nLearn more about [Issue Comment Summaries](/blog/summarize-issues/).\n\n### GitLab Chat\n- Helps quickly identify useful information in large volumes like documentation.\nLearn more about [GitLab Chat](https://gitlab.com/groups/gitlab-org/-/epics/10220).\n\n### Value Stream Forecasting\n- Predicts productivity metrics and identifies anomalies across your software development lifecycle.\nLearn more about [Value Stream Analytics](https://docs.gitlab.com/ee/user/group/value_stream_analytics/).\n\nThese are just the beginning of many features we have in the works leveraging generative AI to provide our customers [AI-assisted features](/topics/devops/the-role-of-ai-in-devops/) across our DevSecOps platform. With our value of iteration at the heart of our work, we are actively improving all the capabilities we announced today as well as introducing new capabilities. [AI is in all we do](/company/yearlies/#fy24-yearlies) and we intend to ship many capabilities throughout the year as they become ready.  \n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)",[1135,704,9,703],{"slug":1512,"featured":6,"template":680},"gitlab-ai-assisted-features","content:en-us:blog:gitlab-ai-assisted-features.yml","Gitlab Ai Assisted Features","en-us/blog/gitlab-ai-assisted-features.yml","en-us/blog/gitlab-ai-assisted-features",{"_path":1518,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1519,"content":1525,"config":1532,"_id":1534,"_type":14,"title":1535,"_source":16,"_file":1536,"_stem":1537,"_extension":19},"/en-us/blog/gitlab-ai-cicd-customization-toolkit",{"title":1520,"description":1521,"ogTitle":1520,"ogDescription":1521,"noIndex":6,"ogImage":1522,"ogUrl":1523,"ogSiteName":693,"ogType":694,"canonicalUrls":1523,"schema":1524},"GitLab AI, CI/CD and customization for secure scaled growth","Find out how the latest developments for the GitLab AI-powered DevSecOps Platform help organizations scale to enterprise levels.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679194/Blog/Hero%20Images/duo-blog-post.png","https://about.gitlab.com/blog/gitlab-ai-cicd-customization-toolkit","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Drive secure growth at scale: Your GitLab AI, CI/CD, and customization toolkit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mike Flouton\"}],\n        \"datePublished\": \"2023-10-31\",\n      }",{"title":1526,"description":1521,"authors":1527,"heroImage":1522,"date":1529,"body":1530,"category":10,"tags":1531},"Drive secure growth at scale: Your GitLab AI, CI/CD, and customization toolkit",[1528],"Mike Flouton","2023-10-31","\nScaling up to enterprise-level intensifies the demand for rapid, secure software delivery. Large organizations can easily fall into the trap of single-function silos, making collaboration tricky and slowing development. Over the past few months, we've introduced new capabilities for the GitLab AI-powered DevSecOps Platform to help teams address these hurdles, accelerate innovation, ensure compliance, and fortify their digital defenses.\n- [AI capabilities that reshape speed and security](#ai-capabilities-that-reshape-speed-and-security)\n- [A single, enterprise-ready DevSecOps platform](#a-single-enterprise-ready-devsecops-platform) \n- [A customizable solution that fits the way you work](#a-customizable-solution-that-fits-the-way-you-work)\n\nLet’s take a closer look at what we've been working on and how these advancements benefit growing organizations.\n\n> Bring the best practices of industry leaders to your team. Join GitLab and Nasdaq for an exciting discussion about AI, DevSecOps, and developer productivity. [Register for this webinar today!](https://page.gitlab.com/webcast-fy24q3-devsecops-ai-developer-productivity.html)\n\n## AI capabilities that reshape speed and security\nAI will transform the way organizations develop software. Our [State of AI in Software Development](https://about.gitlab.com/developer-survey/#ai) report, released earlier this year, demonstrates this: 83% of DevSecOps professionals surveyed said implementing AI in their software development processes is essential to avoid falling behind competitors. \n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/) is a powerful set of AI capabilities within GitLab’s DevSecOps Platform that helps to speed up development of code, improve operations, and secure software. Since its debut in June, we’ve been steadily expanding the suite of AI capabilities. These now extend across the entire software development lifecycle – from suggesting code, to finding and explaining vulnerabilities in code, to identifying appropriate code reviewers. As enterprises increase code generation, they can avoid potential bottlenecks, such as security checks, further downstream.\n\nFor example, we recently released our [GitLab Duo Vulnerability Explanation feature into Beta](https://about.gitlab.com/blog/remediating-vulnerabilities-with-insights-and-ai/). Typically, vulnerability discovery and mitigation would require a significant amount of back-and-forth between development and application security teams to agree on severity levels and approaches to fix the vulnerability. Vulnerability Explanation alleviates this inefficiency by summarizing detected vulnerabilities and their implications as well as providing in-depth solutions and suggested mitigation within the developer’s workflow, enabling faster resolution and creation of safer code within the development workflow. \n\n![GitLab Duo Vulnerability Explanation](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/ai_explain_this_vulnerability_results.png)\n\n\nFor even more efficiency, [GitLab Duo Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) (Beta) helps developers create new code and update existing code faster. [GitLab Duo Suggested Reviewers](https://about.gitlab.com/blog/gitlab-suggested-reviewers/) (generally available to all users) helps teams make an informed decision when choosing reviewers that can meet their review criteria.\n\nLearn about [all GitLab Duo capabilities](https://about.gitlab.com/gitlab-duo/).\n\nWatch GitLab Duo capabilities in action.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/LifJdU3Qagw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\n## A single, enterprise-ready DevSecOps platform \nEnterprise needs from a software delivery platform are unique. A DevSecOps platform must support the ability to:\n- build for speed with adequate security guardrails right from the start\n- consolidate to a single platform, but still integrate with your existing solution\n- simply adopt and onboard developers, but handle the complexity of scale\n\nGitLab CI/CD is a core way for organizations to meet these requirements. As customers scale their adoption of GitLab, they run millions of CI/CD jobs on a monthly basis. With the efficiency improvements further driven by GitLab Duo, these numbers will likely increase. However, organizations will need to find efficiency opportunities throughout their development and deployment workflows to be able to handle this growth, ensuring that whatever is deploying into production meets their quality, security, and reliability standards.\n\nThe [GitLab CI/CD Component Catalog](https://about.gitlab.com/blog/introducing-ci-components/), which will soon be released into Beta, solves these problems by enabling organizations to standardize their pipelines and create building blocks in a centralized repository that can be easily discovered, reused, and shared across teams. Enterprises can develop base pipeline configurations with the proper compliance, quality, and security checks already built-in for use across their organization. \n\nHere are some more capabilities aimed at improving the enterprise platform experience:\n- The GitLab Runner ecosystem continues to expand as we've recently introduced [GitLab SaaS runners on MacOS](https://about.gitlab.com/releases/2023/09/22/gitlab-16-4-released/#macos-13-ventura-image-for-saas-runners-on-macos), [xlarge and 2xlarge SaaS Runners on Linux](https://about.gitlab.com/releases/2023/08/22/gitlab-16-3-released/#more-powerful-gitlab-saas-runners-on-linux), [increased storage on medium and large SaaS Runners on Linux](https://about.gitlab.com/releases/2023/06/22/gitlab-16-1-released/#increased-storage-for-gitlab-saas-runners-on-linux), and [GPU-enabled SaaS Runners on Linux](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/#gpu-enabled-saas-runners-on-linux) for supporting data science workloads.\n- GitLab Duo, which was previously only available for GitLab SaaS, is now extended to GitLab self-hosted. Enterprises that prefer to self-host or must self-host due to compliance and regulatory restrictions can now take advantage of our AI features, starting with [Code Suggestions](https://about.gitlab.com/blog/self-managed-support-for-code-suggestions/).\n- Organizations looking at using GitLab Packages as their consolidated package registry can now [import packages](https://docs.gitlab.com/ee/user/packages/package_registry/supported_functionality.html#importing-packages-from-other-repositories) from their current package registries like Maven Central or Artifactory. GitLab [supports importing](https://docs.gitlab.com/ee/user/packages/package_registry/supported_functionality.html#importing-packages-from-other-repositories) Maven, npm, NuGet, and PyPI package types into GitLab, with many more package formats to follow. \n\n## A customizable solution that fits the way you work\nAs companies grow, there is an increasing need to personalize development and deployment settings and provide distinct visibility into the DevSecOps lifecycle to users beyond the immediate DevSecOps teams. GitLab is designed to function effectively with minimal adjustments, yet it offers the flexibility to be tailored to the requirements of expanding organizations. \n\nOur recent developments, including [changes to product navigation](https://about.gitlab.com/blog/navigation-research-blog-post/), are driven by comprehensive user research. We recognize that each organization and its individual users have unique, preferred workflows. Our updated navigation features, such as pinning frequently accessed items, visualizing work, and simplifying navigation through fewer top-level items, empower DevSecOps teams to align the platform with their optimal environment and workflow.\n\nWatch the new and simplified navigation in action.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/rGTl9_HIpbY\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\nHere are some other highlights:\n- In addition to overhauling the navigation, we [introduced the rich text editor](https://about.gitlab.com/releases/2023/07/22/gitlab-16-2-released/#all-new-rich-text-editor-experience) by providing a “what you see is what you get” editing experience. The rich text editor is now available in all issues, epics, and merge requests.\n- GitLab offers [six out-of-the-box roles](https://docs.gitlab.com/ee/user/permissions.html#roles), but for many enterprises this was not enough. Some roles gave too much permission, while others didn’t grant enough permissions to complete a task. Enterprises needed a way to define their own roles – leading to [customizable roles](https://docs.gitlab.com/ee/user/custom_roles.html), which gives GitLab administrators the ability to define roles with granular permissions suited for their needs.\n- GitLab Value Streams Dashboard ensures that all stakeholders have visibility into the progress and value delivery metrics associated with software development and delivery. To align with customers’ needs to customize the data viewed and the appearance, we introduced [new velocity metrics](https://about.gitlab.com/releases/2023/08/22/gitlab-16-3-released/#new-velocity-metrics-in-the-value-streams-dashboard) and the ability to [customize the appearance and data](https://about.gitlab.com/releases/2023/07/22/gitlab-16-2-released/#new-customization-layer-for-the-value-streams-dashboard) to adjust metrics based on their areas of interest, filter out irrelevant information, and focus on the data that is most relevant to their analysis or decision-making process.\n\n![New velocity metrics in the Value Streams Dashboard](https://about.gitlab.com/images/16_3/16.3_vsd.mr_iss.png)\n\n\n## The enterprise awaits — get growing today\t\nOrganizations on a growth trajectory need a way to sustain that growth. They'll need to leverage the capabilities of AI to generate code faster — but they can't sacrifice quality or security. Organizations will also need to set standards for development and deployment that extend across the enterprise, and every user will need a clear and customizable view of the DevSecOps lifecycle. As we bring new capabilities into the GitLab DevSecOps Platform, we will continue to support these enterprise-class needs.\n\n> Bring the best practices of industry leaders to your team. Join GitLab and Nasdaq for an exciting discussion about AI, DevSecOps, and developer productivity. [Register for this webinar today!](https://page.gitlab.com/webcast-fy24q3-devsecops-ai-developer-productivity.html)\n\n\n**Disclaimer:** This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.\n",[725,109,9,704],{"slug":1533,"featured":6,"template":680},"gitlab-ai-cicd-customization-toolkit","content:en-us:blog:gitlab-ai-cicd-customization-toolkit.yml","Gitlab Ai Cicd Customization Toolkit","en-us/blog/gitlab-ai-cicd-customization-toolkit.yml","en-us/blog/gitlab-ai-cicd-customization-toolkit",{"_path":1539,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1540,"content":1545,"config":1551,"_id":1553,"_type":14,"title":1554,"_source":16,"_file":1555,"_stem":1556,"_extension":19},"/en-us/blog/gitlab-chat-ai",{"title":1541,"description":1542,"ogTitle":1541,"ogDescription":1542,"noIndex":6,"ogImage":820,"ogUrl":1543,"ogSiteName":693,"ogType":694,"canonicalUrls":1543,"schema":1544},"ML experiment: Use a chatbot to answer how-to questions","Learn how GitLab is experimenting with a docs chatbot that you can ask product questions in this latest installment of our ongoing 'AI/ML in DevSecOps' series.","https://about.gitlab.com/blog/gitlab-chat-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Use a chatbot to answer how-to questions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah Waldner\"}],\n        \"datePublished\": \"2023-05-04\",\n      }",{"title":1541,"description":1542,"authors":1546,"heroImage":820,"date":1548,"body":1549,"category":10,"tags":1550},[1547],"Sarah Waldner","2023-05-04","\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nAt GitLab, [everyone can contribute](/company/mission/). As a platform, GitLab offers a wide variety of features, but it is hard to know everything GitLab is capable of without diligently combing through our documentation, testing out our features, or talking to someone at GitLab who can answer your questions. Reading documentation can send users down a rabbit hole and they may not get their question answered. This can lead to time spent with support, meetings with the Customer Success team, going back and forth with a solutions architect, coordinating with technical account managers, and maybe opting for some professional services hours. What if users had access to all the knowledge they needed at their fingertips 24/7 and could get their complex questions answered immediately?\n        \nIn an experimental feature, the [Global Search team](/handbook/engineering/development/enablement/data_stores/search/) used AI to create a chatbot that answers how-to questions about the GitLab product. It will respond with an explanation and relevant links to our documentation.   \n\n![GitLab Chat answering a simple question](https://about.gitlab.com/images/blogimages/gitlab_chat.gif){: .shadow}\n\nGetting answers on how-to questions while in the product eliminates time lost to context switching. The chat interface overlays the GitLab UI, which enables you to interface with a virtual expert alongside your work. This is especially helpful when you are involved in a complex multi-step task like setting up a gitlab-ci.yml file, configuring security policies, or editing a CODEOWNERS file.\n\nGitLab chat will answer any question that you would utilize our product documentation to answer. It also provides helpful links if the bot's response is not as detailed as you needed - the full documentation is just one click away.\n\n## Be part of our AI-assisted features journey\n\nThis experiment is just the start of the ways we're looking to infuse GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI Assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our ongoing series, \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\".\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[703,725,704,9],{"slug":1552,"featured":6,"template":680},"gitlab-chat-ai","content:en-us:blog:gitlab-chat-ai.yml","Gitlab Chat Ai","en-us/blog/gitlab-chat-ai.yml","en-us/blog/gitlab-chat-ai",{"_path":1558,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1559,"content":1563,"config":1570,"_id":1572,"_type":14,"title":1573,"_source":16,"_file":1574,"_stem":1575,"_extension":19},"/en-us/blog/gitlab-duo-agent-platform-public-beta",{"noIndex":6,"title":1560,"description":1561,"ogImage":1562},"GitLab Duo Agent Platform goes public beta","Introducing the DevSecOps orchestration platform designed to unlock asynchronous collaboration between developers and AI agents.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1752678395/impw8no5tbskr6k2afgu.jpg",{"tags":1564,"category":10,"date":1565,"heroImage":1562,"authors":1566,"description":1561,"title":1568,"body":1569},[9,725,704,1135],"2025-07-17",[1567],"Bill Staples","GitLab Duo Agent Platform Public Beta: Next-gen AI orchestration and more","**We're building the future of software development.**\n\nAt GitLab, we are [reimagining the future of software engineering](https://about.gitlab.com/blog/gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops/) as a human and AI collaboration. Where developers focus on solving technical, complex problems and driving innovation, while AI agents handle the routine, repetitive tasks that slow down progress. Where developers are free to explore new ideas in code at much lower cost, bug backlogs are a thing of the past, and users of the software you build enjoy a more usable, reliable, and secure experience. This isn't a distant dream. We're building this reality today, and it is called the GitLab Duo Agent Platform.\n\n## What is GitLab Duo Agent Platform?\n\nGitLab Duo Agent Platform is our next-generation DevSecOps orchestration platform designed to unlock asynchronous collaboration between developers and AI agents. It will transform your development workflow from isolated linear processes into dynamic collaboration where specialized AI agents work alongside you and your team on every stage of the software development lifecycle; it will be like having an unlimited team of colleagues at your disposal.\n\nImagine delegating a complex refactoring task to a Software Developer Agent while simultaneously having a Security Analyst Agent scan for vulnerabilities and a Deep Research Agent analyze progress across your repository history. This all happens in parallel, orchestrated seamlessly within GitLab.\n\nToday, we are announcing the launch of the [first public beta of the GitLab Duo Agent Platform](https://about.gitlab.com/gitlab-duo/agent-platform/) for GitLab.com and self-managed GitLab Premium and Ultimate customers. This is just the first in a series of updates that will improve how software gets planned, built, verified, and deployed as we amplify human ingenuity through intelligent automation.\n\nThis first beta focuses on unlocking the IDE experience through the GitLab VS Code extension and JetBrains IDEs plug-in; next month, we plan on bringing the Duo Agent Platform experience to the GitLab application and expand our IDE support. Let me share a bit more about our vision for the roadmap between now and general availability, planned for later this year. You can find details about the first beta down below.\n\nWatch this video or read on for what's available now and what's to come. Then, if you're ready to get started with Duo Agent Platform, [find out how with the public beta](#get-started-now).\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1101993507?title=0&amp;byline=0&amp;portrait=0&amp;badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"GitLab Agent Platform Beta Launch_071625_MP_v2\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## GitLab's unique position as an orchestration platform\n\nGitLab sits at the heart of the development lifecycle as the system of record for engineering teams, orchestrating the entire journey from concept to production for over 50 million registered users, including half of the Fortune 500 across geographies. This includes over 10,000 paying customers across all segments and verticals, including public institutions.\n\nThis gives GitLab something no competitor can match: a comprehensive understanding of everything it takes to deliver software. We bring together your project plans, code, test runs, security scans, compliance checks, and CI/CD configurations to not only power your team but also orchestrate collaboration with AI agents you control.\n\nAs an intelligent, unified DevSecOps platform, GitLab stores all of the context about your software engineering practice in one place. We will expose this unified data to AI agents via our knowledge graph. Every agent we build has automatic access to this SDLC-connected data set, providing rich context so agents can make informed recommendations and take actions that adhere to your organizational standards.\n\n**Here's an example of this advantage in action.** Have you ever tried to figure out exactly how a project is going across dozens, if not hundreds, of stories and issues being worked on across all the developers involved? Our Deep Research Agent leverages the GitLab Knowledge Graph and semantic search capabilities to traverse your epic and all related issues, and explore the related codebase and surrounding context. It quickly correlates information across your repositories, merge requests, and deployment history. This delivers critical insights that standalone tools can't match and that would take human developers hours to uncover. \n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1101998114?title=0&amp;byline=0&amp;portrait=0&amp;badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Deep Research Demo_071625_MP_v1\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Our strategic evolution from AI features to agent orchestration\n\nGitLab Duo started as an add-on, bringing generative AI to developers through Duo Pro and Enterprise. With GitLab 18.0, it's now built into the platform. We've unlocked [Duo Agentic Chat](https://about.gitlab.com/blog/gitlab-duo-chat-gets-agentic-ai-makeover/) and Code Suggestions for all Premium and Ultimate users, and now we're providing immediate access to the Duo Agent Platform.\n\nWe've ramped up engineering investment and are accelerating delivery, with powerful new AI features landing every month. But we're not just building another coding assistant. GitLab Duo is becoming an agent orchestration platform, where you can create, customize, and deploy AI agents that work alongside you and interoperate easily with other systems, dramatically increasing productivity. \n\n> **“GitLab Duo Agent Platform enhances our development workflow with AI that truly understands our codebase and our organization. Having GitLab Duo AI agents embedded in our system of record for code, tests, CI/CD, and the entire software development lifecycle boosts productivity, velocity, and efficiency. The agents have become true collaborators to our teams, and their ability to understand intent, break down problems, and take action frees our developers to tackle the exciting, innovative work they love.”** - Bal Kang, Engineering Platform Lead at NatWest\n\n### Agents that work out of the box\n\nWe are introducing agents that mirror familiar team roles. These agents can search, read, create, and modify existing artifacts across GitLab. Think of these as agents you can interact with individually, that also act as building blocks that you can customize to create your own agents. Like your team members, agents have defined specializations, such as software development, testing, or technical writing. As specialists, they're tapping into the right context and tools to consistently accomplish the same types of tasks, wherever they're deployed.\n\nHere are some of the agents we're building today:\n\n- **Chat Agent (now in beta):** Takes natural language requests to provide information and context to the user. Can perform general development tasks, such as reading issues or code diffs. As an example, you can ask Chat to debug a failed job by providing the job URL.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1102616311?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"agentic-chat-in-web-ui-demo_Update V2\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\u003Cp>\u003C/p>\n\n- **Software Developer Agent (now in beta):** Works on assigned items by creating code changes in virtual development environments and opening merge requests for review.\n\n- **Product Planning Agent:** Prioritizes product backlogs, assigns work items to human and agentic team members, and provides project updates over specified timelines.\n\n- **Software Test Engineer Agent:** Tests new code contributions for bugs and validates if reported issues have been resolved. \n\n- **Code Reviewer Agent:** Performs code reviews following team standards, identifies quality and security issues, and can merge code when ready.\n\n- **Platform Engineer Agent:** Monitors GitLab deployments, including GitLab Runners, tracks CI/CD pipeline health, and reports performance issues to human platform engineering teams.\n\n- **Security Analyst Agent:** Finds vulnerabilities within codebases and deployed applications, and implements code and configuration changes to help resolve security weaknesses.\n\n- **Deployment Engineer Agent:** Deploys updates to production, monitors for unusual behavior, and rolls back changes that impact application performance or security.\n\n- **Deep Research Agent:** Conducts comprehensive, multi-source analysis across your entire development ecosystem.\n\nWhat makes these agents powerful is their native access to GitLab's comprehensive toolkit. Today, we have over 25 tools, from issues and epics to merge requests and documentation, with more to come. Unlike external AI tools that operate with limited context, our agents work as true team members with full platform privileges under your supervision.\n\nIn the coming months, you'll also be able to modify these agents to meet the needs of your organization. For example, you'll be able to specify that a Software Test Engineer Agent follows best practices for a particular framework or methodology, deepening its specialization and turning it into an even more valuable team member.\n\n## Flows orchestrate complex agent tasks\n\nOn top of individual agents, we are introducing agent Flows. Think of these as more complex workflows that can include multiple agents with pre-built instructions, steps, and actions for a given task that can run autonomously. \n\nWhile you can create Flows for basic tasks common to individuals, they truly excel when applied to complex, specialized tasks that would normally take hours of coordination and effort to complete. Flows will help you finish complex tasks faster and, in many cases, asynchronously without human intervention.\n\nFlows have specific triggers for execution. Each Flow contains a series of steps, and each step has detailed instructions that tell a specialized agent what to do. This granular approach allows  you to give precise instructions to agents in the Flow. By defining instructions in greater detail and establishing structured decision points, Flows can help solve for the inherent variability in AI responses while eliminating the need to repeatedly specify the same requirements, unlocking more consistent and predictable outcomes without user configuration.\n\nHere are some examples of out-of-the-box Flows that we are building:\n\n- **Software Development Flow (now in beta):** Orchestrates multiple agents to plan, implement, and test code changes end-to-end, helping transform how teams deliver features from concept to production.\n\n- **Issue-to-MR Flow:** Automatically converts issues into actionable merge requests by coordinating agents to analyze requirements, prepare comprehensive implementation plans, and generate code.\n\n- **Convert CI File Flow:** Streamlines migration workflows by having agents analyze existing CI/CD configurations and intelligently convert them to GitLab CI format with full pipeline compatibility.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1101941425?title=0&amp;byline=0&amp;portrait=0&amp;badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"jenkins-to-gitlab-cicd-for-blog\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\n- **Search and Replace Flow:** Discovers and transforms code patterns across codebases by systematically analyzing project structures, identifying optimization opportunities, and executing precise replacements.\n\n- **Incident Response & Root Cause Analysis Flow:** Orchestrates incident response by correlating system data, coordinating specialized agents for root cause analysis, and executing approved remediation steps while keeping human stakeholders informed throughout the resolution process.\n\nThis is where GitLab Duo Agent Platform is taking a truly unique approach versus other AI solutions. We won't just give you pre-built agents. We'll also give you the power to create, customize, and share agent Flows that perfectly match your individual and organization's unique needs. And with Flows, you will then be able to give agents a specific execution plan for common and complex tasks.\n\nWe believe this approach is more powerful than building purpose-built agents like our competitors do, because every organization has different workflows, coding standards, security requirements, and business logic. Generic AI tools can't understand your specific context, but GitLab Duo Agent Platform will be able to be tailored to work exactly how your team works.\n\n## Why build agents and agent Flows in the GitLab Duo Agent Platform?\n\n**Build fast.** You can build agents and complex agent Flows in the Duo Agent Platform quickly and easily using a fast, declarative extensibility model and UI assistance.\n\n**Built-in compute.** With Duo Agent Platform, you no longer have to worry about the hassle of standing up your own infrastructure for agents: compute, network, and storage are all built-in.\n\n**SDLC events.** Your agents can be invoked automatically on common events: broken pipeline, failed deployment, issue created, etc.\n\n**Instant access.** You can interact with your agents everywhere in GitLab or our IDE plug-in: assign them issues, @mention them in comments, and chat with them everywhere Duo Chat is available.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1102029239?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"assigning an agent an issue\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script> \u003Cp>\u003C/p>\n\n**Built-in and custom models supported.** Your agents will have automatic access to all of the models we support, and users will be able to choose specific models for specific tasks. If you want to connect Duo Agent Platform to your own self-hosted model, you will be able to do that too!\n\n**Model Context Protocol (MCP) endpoints.** Every agent and Flow can be accessed or triggered via native MCP endpoints, allowing you to connect to and collaborate with your agents and Flows from anywhere, including popular tools like Claude Code, Cursor, Copilot, and Windsurf.\n\n**Observability and security.** Finally, we provide built-in observability and usage dashboards, so you can see exactly who, where, what, and when agents took actions on your behalf.\n\n## A community-driven future\n\nCommunity contributions have long fueled GitLab's innovation and software development. We're excited to partner with our community with the introduction of the AI Catalog. The AI Catalog will allow you to create and share agents and Flows within your organization and across the GitLab Ecosystem in our upcoming beta.\n\nWe believe that the most valuable AI applications are likely to emerge from you, our community, thanks to your daily application of GitLab Duo Agent Platform to solve numerous real-world use cases. By enabling seamless sharing of agents and Flows, we're creating a network effect where each contribution enhances the platform's collective intelligence and value. Over time, we believe that the most valuable use cases from Agent Platform will come from our thriving GitLab community. \n\n![AI Catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752685501/awdwx08udwrxgvcpmssb.png \"AI Catalog\")\n\n## Available today in the GitLab Duo Agent Platform in public beta\n\nThe GitLab Duo Agent Platform public beta is available now to Premium and Ultimate customers with these capabilities:\n\n**Software Development Flow:** Our first Flow orchestrates agents in gathering comprehensive context, clarifying ambiguities with human developers, and executing strategic plans to make precise changes to your codebase and repository. It leverages your entire project, including its structure, codebase, and history, along with additional context like GitLab issues or merge requests to amplify developer productivity.\n\n**New Agent tools available:** Agents now have access to multiple tools to do their work, including:\n\n  - File System (Read, Create, Edit, Find Files, List, Grep)\n  - Execute Command Line*\n  - Issues (List, Get, Get Comments, Edit*, Create*, Add/Update Comments*)\n  - Epics (Get, Get Comments)\n  - MR (Get, Get Comments, Get Diff, Create, Update)\n  - Pipeline (Job Logs, Pipeline Errors)\n  - Project (Get, Get File)\n  - Commits (Get, List, Get Comments, Get Diff)\n  - Search (Issue Search)\n  - Secure (List Vulnerabilities)\n  - Documentation Search\n  \n*=Requires user approval\n\n**GitLab Duo Agentic Chat in the IDE:** Duo Agentic Chat transforms the chat experience from a passive Q&A tool into an active development partner directly in your IDE.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1103237126?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"agentic-ai-launch-video_NEW\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\u003Cp>\u003C/p>\n\n- **Iterative feedback and chat history:** Duo Agentic Chat now supports chat history and iterative feedback, transforming the agent into a stateful, conversational partner. This fosters trust, enabling developers to delegate more complex tasks and offer corrective guidance.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1101743173?title=0&amp;byline=0&amp;portrait=0&amp;badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"agentic-chat-history\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\n- **Streamlined delegation with slash commands:** Expanded, more powerful slash commands, such as /explain, /tests, and /include, create a “delegation language” for quick and precise intent. The /include command allows the explicit injection of context from specific files, open issues, merge requests, or dependencies directly into the agent's working memory, making the agent more powerful and teaching users how to provide optimal context for high-quality responses.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1101743187?title=0&amp;byline=0&amp;portrait=0&amp;badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"include-agentic-chat-jc-voiceover\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\n- **Personalization through custom rules:** New Custom Rules enables developers to tailor agent behavior to individual and team preferences using natural language, for example, development style guides. This foundational mechanism shapes the agent's persona into a personalized assistant, evolving toward specialized agents based on user-defined preferences and organizational policies.\n    \n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1101743179?title=0&amp;byline=0&amp;portrait=0&amp;badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"custom-rules-with-jc-voiceover\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\n- **Support for GitLab Duo Agentic Chat in JetBrains IDE:** To help meet developers where they work, we have expanded Duo Agentic Chat support to the JetBrains family of IDEs, including IntelliJ, PyCharm, GoLand, and Webstorm. This adds to our existing support for VS Code. Existing users get agentic capabilities automatically, while new users can install the plugin from the JetBrains Marketplace.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1101743193?title=0&amp;byline=0&amp;portrait=0&amp;badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"jetbrains-support-jc-voiceover\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n    \n- **MCP client support:** Duo Agentic Chat can now act as an MCP client, connecting to remote and locally running MCP servers. This capability unlocks the agent's ability to connect to systems beyond GitLab like Jira, ServiceNow, and ZenDesk to gather context or take actions. Any service that exposes itself via MCP can now become part of the agent's skill set. The official GitLab MCP Server is coming soon!\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1101743202?title=0&amp;byline=0&amp;portrait=0&amp;badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"McpDemo\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n    \n- **GitLab Duo Agentic Chat in GitLab Web UI.** Duo Agentic Chat is also now available directly within the GitLab Web UI. This pivotal step evolves the agent from a coding assistant to a true DevSecOps agent, as it gains access to rich non-code context, such as issues and merge request discussions, allowing it to understand the \"why\" behind the work. Beyond understanding context, the agent can make changes directly from the WebUI, such as automatically updating issue statuses or editing merge request descriptions.\n\n## Coming soon to GitLab Duo Agent Platform\n\nOver the coming weeks, we'll release new capabilities to Duo Agent Platform, including more out-of-the-box agents and Flows. These will bring the platform into the GitLab experience you love today and enable even greater customization and extensibility, amplifying productivity for our customers:\n\n![GitLab Duo Agent Platform public beta roadmap](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752685275/hjbe9iiu2ydp9slibsc2.png \"GitLab Duo Agent Platform public beta roadmap\")\n\n\n- **Integrated GitLab experience:** Building on the IDE extensions available in 18.2, we're expanding agents and Flows within the GitLab platform. This deeper integration will expand the ways you can collaborate synchronously and asynchronously with agents. You will be able to assign issues directly to agents, @mention them within GitLab Duo Chat, and seamlessly invoke them from anywhere in the application while maintaining MCP connectivity from your developer tool of choice. This native integration transforms agents into true development team members, accessible across GitLab.\n\n- **Agent observability:** As agents become more autonomous, we're building comprehensive visibility into their activity as they progress through Flows, enabling you to monitor their decision-making processes, track execution steps, and understand how they're interpreting and acting on your development challenges. This transparency into agent behavior builds trust and confidence while allowing you to optimize workflows and identify bottlenecks, and helps ensure agents are performing exactly as intended.\n\n- **AI Catalog:** Recognizing that great solutions come from community innovation, we will soon introduce the public beta of our AI Catalog — a marketplace which will allow you to extend Duo Agent Platform with specialized Agents and Flows sourced from GitLab, and over time, the broader community.  You'll be able to quickly deploy these solutions in GitLab, leveraging context across your projects and codebase.\n\n- **Knowledge Graph:** Leveraging GitLab's unique advantage as the system of record for source code and its surrounding context, we're building a comprehensive Knowledge Graph that not only maps files and dependencies across the codebase but also makes that map navigable for users while accelerating AI query times and helping increase accuracy. This foundation enables GitLab Duo agents to quickly understand relationships across your entire development environment, from code dependencies to deployment patterns, unlocking faster and more precise responses to complex questions.\n\n![GitLab Duo Agent Platform Knowledge Graph](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752685367/n0tvfgorchuhrronic3j.png \"GitLab Duo Agent Platform Knowledge Graph\")\n\n- **Create and edit agents and Flows:** Understanding that every organization has unique workflows and requirements, we're developing powerful agent and Flow creation and editing capabilities that will be introduced as the AI Catalog matures. You'll be able to create and modify agents and Flows to operate precisely the way your organization works, delivering deep customization across the Duo Agent Platform that enables higher quality results and increased productivity. \n\n![AI Catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752684938/fruwqcqvvrx8gmkz5u0v.png \"AI Catalog\")\n\n- **Official GitLab MCP Server:** Recognizing that developers work across multiple tools and environments, we're building an official GitLab MCP server that will enable you to access all of your agents and Flows via MCP. You'll be able to connect to and collaborate with your agents and Flows from anywhere MCP is supported, including popular tools like Claude Code, Cursor, Copilot, and Windsurf, unlocking seamless AI collaboration regardless of your preferred development environment.\n\n- **GitLab Duo Agent Platform CLI:** Our upcoming CLI will allow you to invoke agents and trigger Flows on the command line, leveraging GitLab's rich context across the entire software development lifecycle—from code repositories and merge requests to CI/CD pipelines and issue tracking. \n\n## Get started now\n\n- **GitLab Premium and Ultimate customers** in GitLab.com and self-managed environments using GitLab 18.2 can use Duo Agent Platform immediately (beta and experimental features for GitLab Duo [must be enabled](https://docs.gitlab.com/user/gitlab_duo/turn_on_off/#turn-on-beta-and-experimental-features)). GitLab Dedicated customers will be able to use the Duo Agent Platform with the release of GitLab 18.2 for Dedicated next month.\n\n- Users should download the [VS Code extension](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow) or the [JetBrains IDEs plugin](https://plugins.jetbrains.com/plugin/22857-gitlab) and follow our [guide to using GitLab Duo Agentic Chat](https://docs.gitlab.com/user/gitlab_duo_chat/agentic_chat/#use-agentic-chat), including Duo Chat [slash commands](https://docs.gitlab.com/user/gitlab_duo_chat/examples/#gitlab-duo-chat-slash-commands). \n\n**New to GitLab?** See GitLab Duo Agent Platform in action at our Technical Demo, offered in two timezone-friendly sessions: [Americas and EMEA](https://page.gitlab.com/webcasts-jul16-gitlab-duo-agentic-ai-emea-amer.html) and [Asia-Pacific](https://page.gitlab.com/webcasts-jul24-gitlab-duo-agentic-ai-apac.html). To get hands-on with GitLab Duo Agent Platform yourself, sign up for a [free trial](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com%2Fsales%2F) today.\n\n\n\u003Csmall>*This blog post contains “forward-looking statements” within the meaning of Section 27A of the Securities Act of 1933, as amended, and Section 21E of the Securities Exchange Act of 1934. Although we believe that the expectations reflected in the forward-looking statements contained in this blog post are reasonable, they are subject to known and unknown risks, uncertainties, assumptions and other factors that may cause actual results or outcomes to be materially different from any future results or outcomes expressed or implied by the forward-looking statements.*\n\n*Further information on risks, uncertainties, and other factors that could cause actual outcomes and results to differ materially from those included in or contemplated by the forward-looking statements contained in this blog post are included under the caption “Risk Factors” and elsewhere in the filings and reports we make with the Securities and Exchange Commission. We do not undertake any obligation to update or release any revisions to any forward-looking statement or to report any events or circumstances after the date of this blog post or to reflect the occurrence of unanticipated events, except as required by law.*\u003C/small>\n",{"featured":91,"template":680,"slug":1571},"gitlab-duo-agent-platform-public-beta","content:en-us:blog:gitlab-duo-agent-platform-public-beta.yml","Gitlab Duo Agent Platform Public Beta","en-us/blog/gitlab-duo-agent-platform-public-beta.yml","en-us/blog/gitlab-duo-agent-platform-public-beta",{"_path":1577,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1578,"content":1581,"config":1587,"_id":1589,"_type":14,"title":1590,"_source":16,"_file":1591,"_stem":1592,"_extension":19},"/en-us/blog/gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops",{"noIndex":6,"title":1579,"description":1580},"GitLab Duo Agent Platform: What’s next for intelligent DevSecOps","GitLab Duo Agent Platform, a DevSecOps orchestration platform for humans and AI agents, leverages agentic AI for collaboration across the software development lifecycle.",{"heroImage":1582,"title":1579,"description":1580,"authors":1583,"date":1584,"body":1585,"category":10,"tags":1586},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1750687578/esmflevxk5bf3eezjhwk.png",[1567],"2025-06-24","I’m thrilled to introduce the next evolution of Duo Workflow: GitLab Duo Agent Platform. This innovative platform extends agentic capabilities across the software development lifecycle, enabling teams to work in parallel with multiple AI agents.\n\nImagine starting your day like this:\n\n* You assign one AI agent to conduct deep research on an epic your team is working on, provide the latest updates on all contributions from the past week, and suggest a release post based on recent feature additions.\n* In parallel, you delegate a handful of accessibility bugs to several agents for analysis and to make the necessary code changes to resolve them.\n* Meanwhile, you ask another agent to review your complicated code changes and provide feedback before sending them to your teammate for formal review.\n* Finally, when the security team pings you about a new vulnerability that needs investigation across your entire project, you hand that research task to your security agent.\n\nAll of this happens simultaneously, while you focus on architecture decisions, creative problem-solving, and strategic technical work. GitLab Duo Agent Platform will let you delegate tasks to five, 10, or even 100 specialized agents — all with full context of your project, not just your code, including CI job logs, planning work items, and so much more. You’re automating the tedious work you have to do, so you can focus on the work that inspires you.\n\n**This isn't about replacing developers. It's about amplifying human creativity and expertise by removing the friction from routine tasks.** That’s the future we’re building with GitLab Duo Agent Platform.\n\n## What is GitLab Duo Agent Platform?\n\nGitLab Duo Agent Platform will enable many-to-many collaboration between engineers and [AI agents](https://about.gitlab.com/topics/agentic-ai/) across the full software development lifecycle, designed to help teams dramatically improve productivity and cycle time.\n\nBuilt on GitLab’s secure foundation, GitLab Duo Agent Platform is customizable and extendable. It empowers developers to build agents to tackle all kinds of software engineering problems, leveraging context across your entire software development lifecycle.\n\nGitLab Duo Agent Platform will go beyond code creation with specialized agents and custom workflows that can help with a nearly unlimited list of activities, including:\n\n* Issue implementation\n* Large-scale migrations/dependency upgrades\n* Automated documentation building/release posts\n* Fixing broken pipelines\n* Incident research support\n* Deep research of status and information on topics\n* Backlog administration\n* Vulnerability resolution\n* Reviews for specific types of code (e.g. database)\n* Quick internal tool building based on existing build blocks\n* and many more!\n\nYou will be able to use our agents out of the box as well as customize and extend them. We’re currently beta testing GitLab Duo Agent Platform with dozens of customers and will open beta access to more teams soon.\n\nWatch GitLab Duo Agent Platform in action:\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1095679084?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Agent Platform Demo Clip\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Choose your tools, your models, and your agents\n\nConsistent with GitLab’s commitment to being an open platform, GitLab agents will seamlessly interoperate with your choice of code-authoring developer tools via standard model context protocol (MCP) and the agent-to-agent (A2A) framework, whether you’re using Cursor, Claude Code, Windsurf, OpenAI Codex, or others.\n\nThe platform will accept code contributions from any development tool in your stack, whether that code was written by a human developer or generated by an AI agent. This means your existing workflows and preferred tools will continue to work seamlessly as you integrate agent capabilities.\n\nGitLab Duo Agent Platform will work with any approved language model that [meets our selection criteria](https://about.gitlab.com/ai-transparency-center/#ai-continuity-plan). For organizations with strict security requirements, it will support approved self-hosted models running in completely air-gapped environments. Your infrastructure requirements and security policies won’t limit your ability to benefit from agentic development.\n\n## Context is everything, and your GitLab Duo agents have it\n\nThe difference between a helpful AI tool and a truly intelligent agent comes down to context. With GitLab Duo Agent Platform, agents don't work in isolation — they're deeply integrated into the platform where development work happens.\n\nEvery agent will automatically understand the full picture of your projects, including your open issues and their history, the merge requests that resolved them, the structure and rationale behind your code, your CI/CD pipeline configurations, security findings, compliance requirements, and the intricate relationships between all these components.\n\nJust like your human team members, agents have all the context to help you ship secure software faster. Instead of just answering questions about code, they will be able to provide insights about how a proposed change might affect your deployment pipeline or suggest security improvements based on your existing compliance rules. We believe that the more your team works within GitLab’s DevSecOps platform, the smarter your agents will become.\n\n## Stay in control while agents scale your team\n\nBuilding trust with AI agents isn't fundamentally different from building trust with new team members. You need to see their work, understand their approach, and gradually increase their responsibilities as they prove their competence.\n\nThat's the philosophy behind our agent approval workflow. Before any agent makes changes to your code or environment, it will present you with a clear plan: what it understands about the issue, the approach it will take, and the specific actions it wants to perform. You’ll then get the opportunity to review, approve, or redirect as needed. Over time, as agents consistently deliver quality work, you will be able to grant them greater autonomy for routine tasks while maintaining oversight for complex or critical work.\n\n## Built for community and customization\n\nGitLab has always thrived on community contributions, and this year marked a milestone with record-breaking customer contributions to our platform. Now we're extending that same collaborative energy to AI agents through our open framework approach.\n\nGitLab Duo Agent Platform isn't just about the agents we build — it's about empowering you and the broader community to create specialized agents that solve your unique engineering challenges. Whether you need an agent that understands your specific coding standards, integrates with your custom toolchain, or handles domain-specific tasks, the platform will give you the building blocks to make it happen.\n\nThis community-driven model creates a virtuous cycle that leverages the strength of the GitLab community through global sharing, similar to our [CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/). Diverse real-world use cases drive innovation. Enterprise feedback ensures reliability and security. And shared solutions benefit everyone. It's the same collaborative approach that has made GitLab successful, now applied to the frontier of agentic development.\n\n## How to get started\n\nIf you've been experimenting with [GitLab Duo Agentic Chat](https://about.gitlab.com/blog/gitlab-duo-chat-gets-agentic-ai-makeover/), now included with every GitLab 18 Premium and Ultimate GitLab.com user license, you've already gotten a taste of what's possible with AI agents in your development workflow.\n\nTo see what GitLab Duo Agent Platform can do and what we’re working on, check out the [demos in the recording of our annual GitLab 18 release event](https://about.gitlab.com/eighteen/).\n\nWant to be among the first to experience it? Sign up for the GitLab [Duo Agent Platform beta waitlist](https://about.gitlab.com/gitlab-duo/agent-platform/). This summer, we'll be opening access to more teams, with new agent features coming out in GitLab 18's upcoming releases throughout the year. We expect general availability this winter.\n\n*Disclaimer: This presentation contains information related to upcoming products, features, and functionality. It is important to note that the information in this presentation is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this presentation and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc.*\n\n## Learn more\n\n- [From vibe coding to agentic AI: A roadmap for technical leaders](https://about.gitlab.com/the-source/ai/from-vibe-coding-to-agentic-ai-a-roadmap-for-technical-leaders/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n- [DevOps automation and AI agents](https://about.gitlab.com/topics/agentic-ai/devops-automation-ai-agents/)\n- [AI-augmented software development: Agentic AI for DevOps](https://about.gitlab.com/topics/agentic-ai/ai-augmented-software-development/)\n- [AI-driven code analysis: The new frontier in code security](https://about.gitlab.com/topics/agentic-ai/ai-code-analysis/)",[9,703,1135,725],{"featured":91,"template":680,"slug":1588},"gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops","content:en-us:blog:gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops.yml","Gitlab Duo Agent Platform What Is Next For Intelligent Devsecops","en-us/blog/gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops.yml","en-us/blog/gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops",{"_path":1594,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1595,"content":1601,"config":1606,"_id":1608,"_type":14,"title":1609,"_source":16,"_file":1610,"_stem":1611,"_extension":19},"/en-us/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes",{"title":1596,"description":1597,"ogTitle":1596,"ogDescription":1597,"noIndex":6,"ogImage":1598,"ogUrl":1599,"ogSiteName":693,"ogType":694,"canonicalUrls":1599,"schema":1600},"GitLab Duo + Amazon Q: Transform ideas into code in minutes","The new GitLab Duo with Amazon Q integration analyzes your issue descriptions and automatically generates complete working code solutions, accelerating development workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097127/Blog/Hero%20Images/Blog/Hero%20Images/Screenshot%202024-11-27%20at%204.55.28%E2%80%AFPM_4VVz6DgGBOvbGY8BUmd068_1750097126673.png","https://about.gitlab.com/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo + Amazon Q: Transform ideas into code in minutes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2025-04-28\",\n      }",{"title":1596,"description":1597,"authors":1602,"heroImage":1598,"date":1603,"body":1604,"category":10,"tags":1605},[720],"2025-04-28","Have you ever spent days or even weeks converting a complex issue into working code? We've all been there. You start with a solid idea and a clear set of requirements, but the path from that initial concept to deployable code can be frustratingly long. Your productivity gets bogged down in implementation details, and projects that should move quickly end up dragging on.\n\nThis is where the power of [agentic AI](https://about.gitlab.com/topics/agentic-ai/) capabilities comes in. [GitLab Duo with Amazon Q](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/), which combines the comprehensive AI-powered DevSecOps platform with the deepest set of cloud computing capabilities, is designed to dramatically accelerate your application development process, all within your familiar GitLab workflow. By streamlining your path from idea to deployment, this powerful integration can propose implementation solutions based on your issue descriptions alone – transforming what used to take days into something that happens in minutes.\n\n## How it works: From issue to working code\n\nLet's walk through how this agentic AI feature works in practice. Imagine you're a developer tasked with creating a mortgage calculator application. Here's how GitLab Duo with Amazon Q helps you get it done:\n\n1. **Create an issue with detailed requirements:** Start by creating a standard [GitLab issue](https://docs.gitlab.com/user/project/issues/). In the description, you'll provide a comprehensive list of requirements that your service needs to meet. This becomes the blueprint for your solution.\n\n2. **Invoke Amazon Q with a quick action:** Once your issue is created, simply add a comment with a quick action, “/q dev”, to invoke Amazon Q. This is where the magic begins. \n\n3. **Let AI generate your implementation:** GitLab Duo with Amazon Q analyzes the issue description you've provided and the context of your source code, then autonomously generates code that meets all your stated requirements. It doesn't stop there – it actually commits those changes in a merge request, ready for your review.\n\n![GitLab Duo  with Amazon Q activity pop-up screenshot](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097156/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097156018.png)\n\n4. **Review the generated application**: Navigate into the merge request to review the generated code. You can verify that all your requirements have been met and make any necessary adjustments.\n\n5. **Test the proposed application**: Finally, check that the application runs successfully. With minimal effort on your part, you now have working code that implements your original requirements.\n\n## Improve your development process\n\nGitLab Duo with Amazon Q completely transforms this process, including dramatically decreasing the time it takes to carry out complex developer tasks, through intelligent automation. By leveraging an agentic AI approach, you can accelerate your path from idea to deployment, freeing development teams to focus on more strategic work.\n\nWith GitLab Duo and Amazon Q, you'll develop software faster, more efficiently, and with less manual coding effort. This integration helps you:\n\n* **Save valuable development time** by automating implementation based on requirements  \n* **Maintain consistency** in code generation across your projects  \n* **Reduce the cognitive load** of translating requirements into working code  \n* **Accelerate your release cycles** by removing implementation bottlenecks  \n* **Focus your expertise** on reviewing and optimizing, rather than writing boilerplate code\n\nReady to see GitLab Duo with Amazon Q in action? Watch our demo video to discover how you can transform your development workflow today.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/jxxzNst3jpo?si=j_LQdZhUnwqoQEst\" title=\"GitLab Duo with Amazon Q demo video for dev workflow\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n> To learn more about GitLab Duo with Amazon Q visit us at an upcoming [AWS Summit in a city near you](https://about.gitlab.com/events/aws-summits/) or [reach out to your GitLab representative](https://about.gitlab.com/partners/technology-partners/aws/#form).\n\n## GitLab Duo with Amazon Q resources\n\n- [GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/)\n- [GitLab and AWS partner page](https://about.gitlab.com/partners/technology-partners/aws/)\n- [GitLab Duo with Amazon Q documentation](https://docs.gitlab.com/user/duo_amazon_q/)",[9,787,724,482,725,231],{"slug":1607,"featured":91,"template":680},"gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes","content:en-us:blog:gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes.yml","Gitlab Duo Amazon Q Transform Ideas Into Code In Minutes","en-us/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes.yml","en-us/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes",{"_path":1613,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1614,"content":1620,"config":1626,"_id":1628,"_type":14,"title":1629,"_source":16,"_file":1630,"_stem":1631,"_extension":19},"/en-us/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant",{"title":1615,"description":1616,"ogTitle":1615,"ogDescription":1616,"noIndex":6,"ogImage":1617,"ogUrl":1618,"ogSiteName":693,"ogType":694,"canonicalUrls":1618,"schema":1619},"GitLab Duo Chat 101: Get more done on GitLab with our AI assistant","In this first article in our series learn how Chat can improve developer productivity – for example, by summarizing issues – and how to improve prompts to get better answers faster.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099126/Blog/Hero%20Images/Blog/Hero%20Images/GitLab_Duo_Blog_Hero_1800x945_r2_B%20%281%29_6a2UB7TOQk3JKxyb5yqYtc_1750099126039.png","https://about.gitlab.com/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Chat 101: Get more done on GitLab with our AI assistant\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2024-05-29\",\n      }",{"title":1615,"description":1616,"authors":1621,"heroImage":1617,"date":1623,"body":1624,"category":10,"tags":1625},[1622],"Abubakar Siddiq Ango","2024-05-29","GitLab Duo Chat became [generally available](https://about.gitlab.com/blog/gitlab-duo-chat-now-generally-available/) in [GitLab 16.11](https://about.gitlab.com/releases/2024/04/18/gitlab-16-11-released/) and its power as a personal assistant can not be overstated. On a DevSecOps platform, more has to happen than just generating code; planning, discussions, security, compliance, and technical reviews are all critical to developing secure software faster. Issues, epics, merge requests, and other sections of GitLab are where this work happens, with knowledge often buried deep in comment threads. It can take a lot of time to get up to speed on these threads, especially when they've grown to hundreds of comments and interactions and when you've been away from them for a while. This is where GitLab Duo Chat can help.\n\nIn this first part of our GitLab Duo Chat 101 series, we'll introduce you to Chat's capabilities and then dig into how to use Chat to summarize comment threads.\n\n> Live demo! Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Register today](https://about.gitlab.com/seventeen/)!\n\n## GitLab Duo Chat's capabilities\n\nWith Chat, you can refactor [existing code](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#refactor-code-in-the-ide), learn how a [block of code works](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#explain-code-in-the-ide), and write [tests](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#write-tests-in-the-ide) for your code, learn about your issues and epics, and much more. Depending on your prompts, you can make Chat do impressive things that boost developer productivity. In the video below, I showcased how you can use GitLab Duo Chat to interact with GitLab and learn about your issues and epics.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/RJezT5_V6dI?si=XlXGs2DHAYa8Awzs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Use cases   \n\nGitLab Duo Chat’s capabilities allow for productivity gains across multiple parts of the software development lifecycle:\n\n- Product and project managers can use Chat’s issues and epics capabilities to gain insights into discussions and plan faster.\n- Developers can create solutions faster with code suggestions and refactoring capabilities. When it comes to working with legacy code or code from other team members, less time is spent on research with the ` /explain` capability providing the necessary insights to understand the code.\n- Quality assurance and test engineers can generate tests and check for vulnerabilities\n- New employees can get a better understanding of their code base and get started solving problems.\n- Beginner programmers can understand and pick up a language or framework quickly and create solutions with Chat providing next steps and insights.\n\n> Check out \"[10 best practices for using GitLab Duo Chat](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/)\" for tips and tricks to craft AI prompts. \n\n## Summarizing issues\n\nWhen you encounter an issue, especially one with a lot of comments, you skim through the issue description, along with a couple of comments, but can't always get the complete picture of the conversations. GitLab Duo Chat can get you up to speed fast. In the image below, I asked Chat to summarize an issue along with a follow-up question. In two prompts, I got what I needed to understand what is going on in the issue without spending hours reading through the comments.\n\n![Chat summarizing an issue](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099137/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099137154.png)\n\nYou can use GitLab Duo Chat on the GitLab interface, as well as [the WebIDE, Visual Studio Code, and JetBrains interfaces](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#use-gitlab-duo-chat-in-the-web-ide).\n\n## Prompts and context\n\nGetting the best responses from any AI tool requires carefully crafting the questions asked. Sometimes, you need to give examples of the responses you expect to prime the large language models (LLMs) toward a desired response. Here are some areas to focus on to get desired responses.\n\n### Context\n\nHere are three prompts with similar objectives but worded differently:\n\n| Prompt 1     | Prompt 2     | Prompt 3    |\n| ---------- | ---------- | ---------- |\n| ![Prompt: Can you summarize this issue's description?](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099137/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099137154.png)       | ![Prompt: Can you provide a high-level summary of this issue?](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099137/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099137155.png)      | ![Prompt: Why is this issue popular?](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099137/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099137156.png)      |\n\nThe context, “this issue,” is common among the three prompts; this tells Chat what resource to use in looking for answers. Prompt 1 gives additional context on what to focus on: the description of the issue. Prompt 2 is not limited in its scope, which means the LLMs will spend more time going through the description and all the comments to provide a more detailed summary of the whole issue. (Note: As of the publication of this blog, there were more than 90 comments in that issue.) Prompt 3 got a poorer response because not much expectation was set for the type of response expected. \n\n[Low-context communication](https://handbook.gitlab.com/handbook/company/culture/all-remote/effective-communication/#understanding-low-context-communication) is critical in crafting your prompt for the best responses, as all information needed for the LLMs to provide an informed response is provided.\n\n### Simplicity\n\nThe wordiness of prompts can sometimes lead to incorrect or no responses. In the image below, you can see that rephrasing a prompt from “Customers have mentioned why this issue is important to them. Can you list the top 3 reasons they mentioned?” to “Why is this issue important to customers?” led to the expected response. When you don’t get the response you desire, simplifying or changing the words used in your prompt can improve the quality of responses.\n\n![Wordy Chat prompts](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099137/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099137158.png)\n\n### Follow-up questions\n\nGitLab Duo Chat can have follow-up conversations – an essential capability. In the image below, I continued asking how the issue in question can be solved in GitLab's code along with a follow-up question asking for code samples.\n\n![Streamlined Chat prompt shown](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099137/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099137158.png)\n\nFollow-up questions allow the application to maintain context and provide faster responses. A recommendation is to provide parts of Chat’s previous responses in the next prompt. In the example above, I mentioned “Rails App,” as previously suggested. \n\n## Get started with GitLab Duo Chat \n\nGitLab Duo Chat does more than help you write better code, it helps you navigate through problems and quickly find solutions. With the right prompts and context, you can build secure software faster.\n\n> Want to try GitLab Duo Chat? [Start your free trial](https://about.gitlab.com/gitlab-duo/#free-trial) today.",[9,482,703,724],{"slug":1627,"featured":91,"template":680},"gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant","content:en-us:blog:gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant.yml","Gitlab Duo Chat 101 Get More Done On Gitlab With Our Ai Assistant","en-us/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant.yml","en-us/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant",{"_path":1633,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1634,"content":1639,"config":1644,"_id":1646,"_type":14,"title":1647,"_source":16,"_file":1648,"_stem":1649,"_extension":19},"/en-us/blog/gitlab-duo-chat-beta",{"title":1635,"description":1636,"ogTitle":1635,"ogDescription":1636,"noIndex":6,"ogImage":861,"ogUrl":1637,"ogSiteName":693,"ogType":694,"canonicalUrls":1637,"schema":1638},"Say hello to GitLab Duo Chat: A new level of AI-assisted productivity","Learn how GitLab Duo Chat, releasing Nov. 16 in Beta, can help elevate your coding skills, streamline onboarding, and supercharge team efficiency.","https://about.gitlab.com/blog/gitlab-duo-chat-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Say hello to GitLab Duo Chat: A new level of AI-assisted productivity\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Torsten Linz\"}],\n        \"datePublished\": \"2023-11-09\",\n      }",{"title":1635,"description":1636,"authors":1640,"heroImage":861,"date":1641,"body":1642,"category":10,"tags":1643},[1076],"2023-11-09","\n[GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html), the newest feature in the GitLab Duo suite of AI-assisted capabilities, helps teams write and understand code faster, get up to speed on the status of projects, and quickly learn GitLab. Chat will be available in Beta starting November 16.\n\nChat also will be included in our [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/) and [GitLab Workflow extension for VS Code](https://docs.gitlab.com/ee/editor_extensions/visual_studio_code/) as an experimental release. Get code explanations, generate tests, and create code right where your development work happens; no need for context switching. \n\nWe launched the [GitLab Duo suite](https://about.gitlab.com/gitlab-duo/) earlier this year to bolster software development teams' workflow, helping you deliver more secure software at an unprecedented pace and create more value for your customers. GitLab is the only platform that brings AI-powered planning tools, code creation, security scanning, and vulnerability remediation all into a single developer-friendly experience.\n\nChat will serve as the foundational technology driving our AI-powered GitLab Duo features, such as Vulnerability Summary and Root Cause Analysis. This will enable you to pose in-context follow-up questions for a more thorough investigation.\n\n> [Contact our sales team](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/) to get started with GitLab Duo Chat.\n\nIn our [Global DevSecOps Report: The State of AI in Software Development](https://about.gitlab.com/developer-survey/#ai), 83% of respondents said that implementing AI in their software development processes is essential to avoid falling behind, and they ranked chatbots among their top three AI use cases. Chat, which will be released for Ultimate tier customers as part of GitLab 16.6, is the perfect feature to help you maintain your competitive advantage. Here's why.\n\n## Designed to support everyone across the software development process \nFrom coding assistance to productivity tips, Chat provides real-time support for technical and non-technical users across the entire software development lifecycle. \n\n- **Inspiration on demand:** Need help determining the next best step in your workflow? Chat is ever-ready to ignite your ideation process.\n\n- **Elevate productivity:** Chat shoulders the burden of routine tasks so you can channel your energy into delivering value to your customers. \n\n- **Guidance at every step:** Whether you're a GitLab expert or a newcomer, Chat is your go-to coach, helping you become an expert in any process or feature.\n\nSometimes, there isn't enough time in the day, and even though you want to focus on important tasks, there are many little things you need to do. \n\n- **Code assistance:** Chat can assist in decoding the mysteries of unfamiliar code. It can explain,  propose tests, or simplify the code. You can also use Chat to write code from scratch interactively.\n\n![Chat in Web IDE](https://about.gitlab.com/images/blogimages/2023-11-09-chat-beta/chatbetaissueepicmanagement.gif){: .shadow}\n\n- **Issue and epic management:** Summarize an issue in seconds, turn comments into an issue description, or distill specific information from large epics easily. You can ask any question about the content of an epic or an issue. Plan work with issues and epics faster with the help of Chat. \n\n![Chat issue and epic management example](https://about.gitlab.com/images/blogimages/2023-11-09-chat-beta/chatinideexperimentalrelease.gif){: .shadow}\n\n- **Onboarding and learning made simple:** Whether you are onboarding to GitLab or you are already an expert learning how to use GitLab is streamlined with Chat.\n\n![Chat learning example](https://about.gitlab.com/images/blogimages/2023-11-09-chat-beta/chatbetareleaselearninglong.gif){: .shadow}\n\n## How your data stays your data  \nChat does not use your proprietary code or inputs to Chat as training data. This privacy-first approach includes both the prompt and the output of Chat. GitLab Duo uses the right large language models (LLMs) for each use case. For instance, Anthropic Claude-2 and Vertex AI Codey with text embedding-gecko LLMs power Chat. Our [publicly available documentation](https://docs.gitlab.com/ee/user/ai_features.html) describes all AI models GitLab Duo uses and [how it uses your data](https://docs.gitlab.com/ee/user/ai_features.html#data-privacy). \n\n## The road ahead for GitLab Duo  \t\nAs we continue to innovate and improve GitLab Duo, we're excited to share that our [Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) capability will transition from Beta to general availability later this year. We look forward to seeing the transformative impact GitLab Duo will have on your software development efforts. Learn more about GitLab Duo Chat in [our documentation](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) and [share your feedback and ideas](https://gitlab.com/gitlab-org/gitlab/-/issues/430124). \n\nTo get started with GitLab Duo Chat, please [contact our sales team](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/).\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[9,703,1135],{"slug":1645,"featured":6,"template":680},"gitlab-duo-chat-beta","content:en-us:blog:gitlab-duo-chat-beta.yml","Gitlab Duo Chat Beta","en-us/blog/gitlab-duo-chat-beta.yml","en-us/blog/gitlab-duo-chat-beta",{"_path":1651,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1652,"content":1658,"config":1663,"_id":1665,"_type":14,"title":1666,"_source":16,"_file":1667,"_stem":1668,"_extension":19},"/en-us/blog/gitlab-duo-chat-get-to-know-productivity-boosting-ai-enhancements",{"title":1653,"description":1654,"ogTitle":1653,"ogDescription":1654,"noIndex":6,"ogImage":1655,"ogUrl":1656,"ogSiteName":693,"ogType":694,"canonicalUrls":1656,"schema":1657},"GitLab Duo Chat: Get to know productivity-boosting AI enhancements","Learn about Chat's new capabilities, including migration to Claude 3.5 Sonnet, new slash command helpers, and the integration of Root Cause Analysis and Explain Vulnerability features.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098629/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%281%29_77JeTV9gAmbXM0224acirV_1750098628882.png","https://about.gitlab.com/blog/gitlab-duo-chat-get-to-know-productivity-boosting-ai-enhancements","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Chat: Get to know productivity-boosting AI enhancements\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jannik Lehmann\"},{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2024-10-03\",\n      }",{"title":1653,"description":1654,"authors":1659,"heroImage":1655,"date":1660,"body":1661,"category":10,"tags":1662},[1174,1175],"2024-10-03","At GitLab, we [continuously strive to enhance your experience with GitLab Duo Chat]((https://gitlab.com/gitlab-org/gitlab/-/issues/430124)), our AI-powered assistant designed to streamline your development workflows. In this article, we share a series of significant updates that bring even more power, precision, and functionality to GitLab Duo Chat. \n\n## Migration to Claude 3.5 Sonnet\n\nWe are thrilled to announce a major upgrade for GitLab Duo Chat: [the migration of its underlying Large Language Model from Claude 3 Sonnet to the more advanced Claude 3.5 Sonnet](https://gitlab.com/gitlab-org/gitlab/-/issues/468334). This new model brings substantial performance enhancements, offering superior accuracy, context-awareness, and efficiency in AI-driven conversations.\n\nWith Claude 3.5 Sonnet powering GitLab Duo Chat, users can expect more precise and relevant responses. This upgrade ensures Chat remains at the forefront of AI technology, helping your team work more effectively in their daily workflows.\n\n![Screenshot of Chat in VS Code](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098636/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098636137.png)\n\nNotice the [code block syntax highlighting](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/1435) in Chat in VS Code.\n\n## Bringing the Slash Command Picker to Chat in the GitLab UI\n\nTo further improve the discovery of GitLab Duo Chat slash commands and make them more quickly accessible to our users, [we’ve introduced the Slash Command Picker UI](https://gitlab.com/gitlab-org/gitlab/-/issues/470703). Now, when you start typing a prompt with `/` in Chat in the GitLab UI, the available slash commands relevant to your current context will be automatically displayed. \n\nThis feature enhances your workflow and acts as the foundation for a growing platform of AI-powered capabilities that we plan to expand in the near future.\n\n![GitLab Duo Chat Slash Command Picker](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098636/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098636138.png)\n\n## Root Cause Analysis integration\n\nGitLab Duo Chat is gaining another powerful feature: [Root Cause Analysis](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#troubleshoot-failed-cicd-jobs-with-root-cause-analysis). \n\nThis integration allows you to maintain context within Chat while investigating failed pipeline jobs, making it easier to ask follow-up questions and explore the root causes of problems.\n\nYou can access Root Cause Analysis by clicking the \"Troubleshoot\" button at the end of the job log, or you can select the job log portions and then ask Chat with the `/troubleshoot` slash command. With this seamless integration, you have the tools you need to resolve issues more efficiently.\n\n![Root Cause Analysis example in Chat](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098636/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098636140.png)\n\n## Fix code in the IDE\n\nOne of the latest enhancements to GitLab Duo Chat is the ability to ask it to [fix selected code](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#fix-code-in-the-ide) within your IDE. This feature, introduced in GitLab 17.3, is available in the Web IDE, VS Code, and JetBrains IDE. It allows you to make specific code fixes by selecting portions of code and using the /fix slash command.\n\nFor example, you can instruct Chat to:\n- Fix grammar mistakes and typos with `/fix grammar mistakes and typos`.\n- Address performance issues using `/fix performance problems`.\n- Solve specific bugs or algorithm-related issues with commands like `/fix duplicate database inserts` or `/fix race conditions`.\n- Resolve code compilation errors with `/fix the build`.\n\nThis feature is designed to help developers quickly resolve common coding issues and improve the quality of their code, all while staying within their familiar IDE environment.\n\nHere is an example for fixing grammar mistakes and improving the language of (code) comments.\n\n![An example for fixing grammar mistakes](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098636/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098636142.png)\n\nHere is an example for fixing C code to print the disk usage. Chat correctly suggests missing header includes and provides more help to avoid additional bugs. The source code is available in [the GitLab Duo challenge project](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/fix-c-cli-perf).\n\n![Chat enhancements - image 5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098636/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098636144.png)\n\n## Explain Vulnerability now in GitLab Duo Chat\n\nAnother highly popular AI-powered feature, [Explain Vulnerability, has been integrated into GitLab Duo Chat](https://gitlab.com/groups/gitlab-org/-/epics/13309). This addition allows you to explore vulnerability details in depth while keeping your Chat context intact. You can ask follow-up questions and engage in more comprehensive discussions directly within the chat environment. You can access this feature by viewing a SAST vulnerability in your project’s Vulnerability Report.\n\nCurrently, this feature supports results from SAST scanners, including [Advanced SAST](https://docs.gitlab.com/ee/user/application_security/sast/gitlab_advanced_sast.html), with plans to extend support to additional scanners soon.\n\n![Sample Vulnerability Report](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098636/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098636148.png)\n\n## What's next?\n\nWe're continuously improving GitLab Duo Chat. Some areas we're exploring include:\n- Context is important. We’re prioritizing the integration of [commits](https://gitlab.com/gitlab-org/gitlab/-/issues/468460), [pipeline jobs](https://gitlab.com/gitlab-org/gitlab/-/issues/468461), and [merge requests](https://gitlab.com/gitlab-org/gitlab/-/issues/464587) into Chat’s contextual scope. Additionally, we are looking into [terminal assistance with Chat](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/1423). This expansion will allow Chat to provide more informed and relevant responses based on a broader range of data.\n- Introduce the `/help` slash command. To make navigating Chat’s AI-powered features even more intuitive, we started development on [a /help slash command](https://gitlab.com/gitlab-org/gitlab/-/issues/462122). This new feature will guide users through the available commands and capabilities for easier and faster access to the tools you need.\n- Make Chat available in [supported IDEs](https://docs.gitlab.com/ee/user/gitlab_duo_chat/#supported-editor-extensions). You can follow the development work for Visual Studio in [this epic](https://gitlab.com/groups/gitlab-org/editor-extensions/-/epics/22). \n\nWe look forward to [hearing your feedback on these enhancements](https://gitlab.com/gitlab-org/gitlab/-/issues/430124). Stay tuned for more updates as we continue to evolve [GitLab Duo Chat](https://about.gitlab.com/gitlab-duo/).\n\n> Get started with GitLab Duo Chat today by [signing up for a free 60-day trial](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?toggle=gitlab-duo-pro).",[9,703,704,725],{"slug":1664,"featured":6,"template":680},"gitlab-duo-chat-get-to-know-productivity-boosting-ai-enhancements","content:en-us:blog:gitlab-duo-chat-get-to-know-productivity-boosting-ai-enhancements.yml","Gitlab Duo Chat Get To Know Productivity Boosting Ai Enhancements","en-us/blog/gitlab-duo-chat-get-to-know-productivity-boosting-ai-enhancements.yml","en-us/blog/gitlab-duo-chat-get-to-know-productivity-boosting-ai-enhancements",{"_path":1670,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1671,"content":1677,"config":1682,"_id":1684,"_type":14,"title":1685,"_source":16,"_file":1686,"_stem":1687,"_extension":19},"/en-us/blog/gitlab-duo-chat-gets-agentic-ai-makeover",{"title":1672,"description":1673,"ogTitle":1672,"ogDescription":1673,"noIndex":6,"ogImage":1674,"ogUrl":1675,"ogSiteName":693,"ogType":694,"canonicalUrls":1675,"schema":1676},"GitLab Duo Chat gets agentic AI makeover  ","Our new Duo Chat experience, currently an experimental release, helps developers onboard to projects, understand assignments, implement changes, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099203/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2820%29_2bJGC5ZP3WheoqzlLT05C5_1750099203484.png","https://about.gitlab.com/blog/gitlab-duo-chat-gets-agentic-ai-makeover","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Chat gets agentic AI makeover  \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Torsten Linz\"}],\n        \"datePublished\": \"2025-05-29\",\n      }",{"title":1672,"description":1673,"authors":1678,"heroImage":1674,"date":1679,"body":1680,"category":10,"tags":1681},[1076],"2025-05-29","Generative AI chat assistants have become standard in software development, helping create and fix code just to start. But what if your chat assistant could understand the artifacts of your entire development process, not just your code? What if that chat assistant could help you work through issues and project documentation before it helps you write code, and could access CI/CD pipelines and merge requests to help you finish coding tasks properly? \n\n**Meet the next generation of GitLab Duo Chat – GitLab Duo Agentic Chat, a significant evolution in AI-native development assistance and the newest addition to our platform, now in [experimental release](https://docs.gitlab.com/policy/development_stages_support/#experiment).** GitLab Duo Agentic Chat is currently available as an experimental feature in VS Code to all users on GitLab.com that have any one of these add-ons: Duo Core, Duo Pro, or Duo Enterprise.\n\nAgentic Chat transforms chat from traditional conversational AI to a chat experience that takes action on your behalf, breaking down complex problems into discrete tasks that it can complete. Instead of simply responding to questions with the context you provide, Agentic Chat can:\n\n* **Autonomously determine** what information it needs to answer your questions  \n* **Execute a sequence of operations** to gather that information from multiple sources  \n* **Formulate comprehensive responses** by combining insights from across your project  \n* **Create and modify files** to help you implement solutions\n\nAnd all of this is done while keeping the human developer within the loop.\n\nAgentic Chat is built on the Duo Workflow architecture, which is [currently in private beta](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/). The architecture comprises agents and tools that take on specific tasks like finding the right context for a given question or editing files. \n\n**Use cases for GitLab Duo Agentic Chat**\n\nHere are some real-world and common use cases for Agentic Chat:\n\n* Onboard to new projects faster by having AI help you familiarize yourself with a new codebase.\n\n* Jump into assigned work immediately, even when issue descriptions are unclear, because Agentic Chat can help you connect the dots between requirements and existing implementations.\n\n* When it's time to make changes, Agentic Chat can handle the implementation work by creating and editing multiple files across your project.\n\n* At release time, Agentic Chat can help you verify that your solution actually addresses the original requirements by analyzing your merge requests against the initial issue or task.\n\n![agentic chat - example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099210/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099210429.png)\n\n\u003Ccenter>\u003Ci>Agentic Chat making code edits\u003C/i>\u003C/center>\n\n## From learning to shipping: A complete workflow demonstration in four steps\n\nTo show how Agentic Chat transforms the development experience, let's walk through a real scenario from our engineering teams. Imagine you're a new team member who's been assigned an issue but knows nothing about the codebase. You can follow along with this video demonstration:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/uG9-QLAJrrg?si=kaOhYylMIaWkIuG8j\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n**Step 1: Understand the project**\n\nInstead of manually exploring files and documentation, you can prompt Agentic Chat:\n\n```unset\nI am new to this project. Could you read the project structure and explain it to me?\n```\n\nAgentic Chat provides a comprehensive project overview by:  \n- Exploring the directory structure  \n- Reading README files and documentation  \n- Identifying key components and applications\n\n**Step 2: Understand your assigned task**\n\nNext, you need to understand your specific assignment, so you can enter this prompt:\n\n```unset\nI have been assigned Issue 1119. Could you help me understand this task, specifically where do I need to apply the refactoring?\n```\n\nAgentic Chat explains the task and proposes a refactoring approach by:\n- Retrieving and analyzing the issue details from the remote GitLab server  \n- Examining relevant project files  \n- Identifying the specific locations requiring changes\n\n**Step 3: Implement the solution**\n\nRather than doing the work manually, you can request:\n\n```unset\nCould you make the edits for me? Please start with steps one, two, three.\n```\n\nAgentic Chat then:  \n- Creates new directories and files as needed \n- Extracts and refactors code across multiple locations  \n- Ensures consistency across all modified files  \n- Provides a summary of all changes made\n\n**Step 4: Verify completion**\n\nFinally, after creating your merge request, you can verify your work:\n\n```unset\nDoes my MR fully address Issue 1119? \n```\n\nAgentic Chat confirms whether all requirements have been met by analyzing both your merge request and the original issue.\n\n## Try it today and share your feedback\n\nGitLab Duo Agentic Chat is currently available as an experimental feature in VS Code to all users on GitLab.com that have any one of these add-ons: Duo Core, Duo Pro, or Duo Enterprise. See our [setup documentation](https://docs.gitlab.com/user/gitlab_duo_chat/agentic_chat/) for prerequisites and configuration steps.\n\nAs an experimental feature, Agentic Chat has some known limitations we're actively addressing, including slower response times due to multiple API calls, keyword-based rather than semantic search, and limited support for new local folders or non-GitLab projects. **Your feedback is crucial in helping us prioritize improvements and bring Agentic Chat to general availability so please share your experience in [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/542198).**\n\n## What's next?\n\nWe are fully focused on improving Agentic Chat, including bringing it to general availability. In the meantime, we are aiming to improve response times and are adding capabilities that GitLab Duo Chat currently has, such as using self-hosted models or supporting JetBrains and Visual Studio in addition to VS Code. Once we have switched Duo Chat to this new architecture we plan to also bring Agentic Chat to the chat in the GitLab web application. We also plan to add a lot more functionality, such as editing GitLab artifacts, supporting context from custom Model Context Protocol, or MCP, servers, and offering commands to run in the terminal.\n\n> Ready to experience autonomous development assistance but not yet a GitLab customer? Try Agentic Chat today as part of [a free, 60-day trial of GitLab Ultimate with Duo Enterprise](https://about.gitlab.com/free-trial/) and help shape the future of AI-powered development. Follow these [setup steps for VS Code](https://docs.gitlab.com/user/gitlab_duo_chat/agentic_chat/#use-agentic-chat-in-vs-code).\n>\n> And make sure to join the GitLab 18 virtual launch event to learn about our agentic AI plans and more. [Register today!](https://about.gitlab.com/eighteen/)\n\n***Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.***\n\n## Learn more\n\n- [GitLab Duo Workflow: Enterprise visibility and control for agentic AI](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n- [Agentic AI guides and resources](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/)\n",[9,1135,704,482,725,724],{"slug":1683,"featured":91,"template":680},"gitlab-duo-chat-gets-agentic-ai-makeover","content:en-us:blog:gitlab-duo-chat-gets-agentic-ai-makeover.yml","Gitlab Duo Chat Gets Agentic Ai Makeover","en-us/blog/gitlab-duo-chat-gets-agentic-ai-makeover.yml","en-us/blog/gitlab-duo-chat-gets-agentic-ai-makeover",{"_path":1689,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1690,"content":1696,"config":1701,"_id":1703,"_type":14,"title":1704,"_source":16,"_file":1705,"_stem":1706,"_extension":19},"/en-us/blog/gitlab-duo-chat-now-generally-available",{"title":1691,"description":1692,"ogTitle":1691,"ogDescription":1692,"noIndex":6,"ogImage":1693,"ogUrl":1694,"ogSiteName":693,"ogType":694,"canonicalUrls":1694,"schema":1695},"GitLab Duo Chat, your at-the-ready AI assistant, is now generally available","Explore a real-world example of how our versatile conversational interface for GitLab Duo helps developers onboard faster and write secure code more efficiently.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671806/Blog/Hero%20Images/GitLab-Duo-Chat-Thumbnail.png","https://about.gitlab.com/blog/gitlab-duo-chat-now-generally-available","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Chat, your at-the-ready AI assistant, is now generally available\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Torsten Linz\"},{\"@type\":\"Person\",\"name\":\"Laurena Alves\"}],\n        \"datePublished\": \"2024-04-18\",\n      }",{"title":1691,"description":1692,"authors":1697,"heroImage":1693,"date":1698,"body":1699,"category":10,"tags":1700},[1076,866],"2024-04-18","GitLab Duo Chat is now generally available in GitLab 16.11, bringing a broad range of AI features together into a single easy-to-use, natural language chat experience. Chat provides real-time guidance for engineering and non-engineering users across the entire software development lifecycle — supporting teams in a wide range of tasks, from understanding code faster and boosting collaboration to quickly learning how to use GitLab.\n\nToday, we are also making the following GitLab Duo capabilities, which are accessible in Chat, generally available:\n- **Code explanation** helps developers understand unfamiliar code.\n- **Code refactoring** enables developers to improve and modernize existing code.\n- **Test generation** automates the writing of tests for functions and methods and helps teams catch bugs sooner.\n\nGitLab Duo Chat is available within the GitLab user interface, including GitLab’s Web IDE, as well as within popular IDEs, including VS Code and the JetBrains suite of IDEs. Chat can also be extended by adding custom tools to help users more accurately complete tasks unique to their organization.\n\nGitLab's [2023 State of AI in Software Development Report](https://about.gitlab.com/developer-survey/#ai) found that developers spend 75% of their time on tasks other than writing code, including understanding and improving existing code, maintaining code, and testing — and AI, when leveraged across the entire software development lifecycle, will drive efficiencies across all these areas. The report also found that over three-quarters of DevSecOps professionals said they are concerned about AI tools having access to private information or intellectual property. By providing teams with a single entry point into AI for the entire software development process, Chat helps teams boost efficiency without sacrificing security, privacy, or code quality. \n\nIn this blog, we’ll explore an example of how you can use Chat to jump into a new project and immediately start adding value.\n\n## Get up to speed, fast\nAs a developer during onboarding, you can ask Chat for help with general knowledge such as understanding CI/CD, the difference between an issue and an epic, how to reset your GitLab password, or how to get started with popular frameworks. With Chat, you have an assistant ready to answer all of your onboarding questions, and soon you’re ready to dig into your first project.\n\n\u003Ccenter>\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175955/Blog/tottp2iwt85d7grkytpc.png\" alt=\"Chat GA blog CI/CD explanation\" width=\"300\">\n\u003C/figure>\n\u003C/center>\n\u003Cp>\u003C/p>\n\n## Accelerate development\nYour first task is to help the Product team update the product UI to include totals for the prices of all products in the inventory.\n\nTo better understand where to add the new functionality, you can ask Chat to explain a portion of the application and get a quick, easy-to-understand explanation of how the code works — without context-switching. Then, as you start developing the new feature, you can use GitLab Duo Code Suggestions to write code more efficiently. GitLab Duo lets you stay in flow by predictively completing code blocks and proposing common code, all in the same environment where you’re already coding.\n\nFinally, you spot a few lines of code that can be simplified, so you ask Chat to refactor the code. After just a few minutes, you have a better understanding of how your application works — and you have new, high-quality code to implement the functionality the Product team is looking for.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/933806295?autoplay=1&loop=1&autopause=0&background=1&muted=1\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Help Center Video\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\n## Stay secure\nBefore you merge any of your new code, you’ll need to run some tests to ensure everything works as expected. You can ask Chat to create unit tests for a Java class you modified. You then paste the unit tests into a new file. You notice that the vulnerability scanner for infrastructure as code has not been enabled, so you ask Chat about it and quickly append the scanner to the project pipeline.\n\nNow you can merge your code with confidence.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/933806352?autoplay=1&loop=1&autopause=0&background=1&muted=1\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Help Center Video\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\n> Explore tips and tricks for [integrating GitLab Duo Chat into your AI-powered DevSecOps workflows](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/).\n\n## Adopt AI with guardrails\nLuckily, while you’ve been busy onboarding, getting up to speed on your first project, and merging your first lines of code, your organization can rest assured that Chat is built with privacy in mind and doesn't use customer data to train AI models. In addition, GitLab Duo AI access controls enable organizations to control sensitive data at the project, sub-group, and group levels by enabling or disabling AI read access.\n\n> Visit the [GitLab AI Transparency Center](https://about.gitlab.com/ai-transparency-center/) to explore how GitLab implements governance and transparency in GitLab Duo.\n\n## Get started with GitLab Duo Chat today\nWhether you’re a developer or you’re managing the entire team, GitLab Duo Chat can empower you to take advantage of AI exactly where you need it throughout the software development lifecycle — all while helping you maintain code quality and security guardrails. GitLab Duo Pro, including code explanation, code refactoring, and test generation, is now available to GitLab Premium and Ultimate customers for $19 USD per user per month.\n\n![GitLab Duo Pro price card](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677968/Blog/Content%20Images/GitLab_Pricing_Guide_PRO_820px_B.png)\n\n> [Start a free trial of GitLab Duo and get started using Chat today.](https://about.gitlab.com/gitlab-duo/#free-trial)\n\n## What’s next for GitLab Duo \nWe are building [GitLab Duo](https://about.gitlab.com/gitlab-duo/) to empower your teams to develop software faster, secure applications more efficiently, better manage software pipelines, and enhance team collaboration. We are excited to share that several GitLab Duo features currently in Beta will soon be released into general availability. Key enhancements include improved AI-powered security capabilities, such as detailed vulnerability explanations and automated resolution processes, as well as summarization and templating tools to help teams improve collaboration in issues, merge requests, and code reviews with concise AI-powered summaries and auto-population of templates.\n\nIn line with our privacy-first approach to AI, we will also continue to introduce new ways for organizations to tailor GitLab Duo to their specific needs with two upcoming capabilities:\n\n- **Model personalization** will enable organizations to leverage the full potential of AI in a way that aligns closely with their strategic goals, operational needs, and customer expectations.\n- **Self-hosted model deployment** will help organizations ensure that data doesn't leave their secure environment, reducing the risk of breaches and ensuring compliance with data protection regulations.\n\n> Visit the [GitLab Duo documentation](https://docs.gitlab.com/ee/user/ai_features.html) for a complete list of generally available, Beta, and Experimental features.",[9,725,482,1135],{"slug":1702,"featured":91,"template":680},"gitlab-duo-chat-now-generally-available","content:en-us:blog:gitlab-duo-chat-now-generally-available.yml","Gitlab Duo Chat Now Generally Available","en-us/blog/gitlab-duo-chat-now-generally-available.yml","en-us/blog/gitlab-duo-chat-now-generally-available",{"_path":1708,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1709,"content":1714,"config":1719,"_id":1721,"_type":14,"title":1722,"_source":16,"_file":1723,"_stem":1724,"_extension":19},"/en-us/blog/gitlab-duo-code-suggestions-is-generally-available",{"title":1710,"description":1711,"ogTitle":1710,"ogDescription":1711,"noIndex":6,"ogImage":861,"ogUrl":1712,"ogSiteName":693,"ogType":694,"canonicalUrls":1712,"schema":1713},"GitLab Duo Code Suggestions is generally available","Learn how our AI-powered workflow helps developers write secure code efficiently.\n","https://about.gitlab.com/blog/gitlab-duo-code-suggestions-is-generally-available","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Code Suggestions is generally available\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2023-12-22\",\n      }",{"title":1710,"description":1711,"authors":1715,"heroImage":861,"date":1716,"body":1717,"category":10,"tags":1718},[1486],"2023-12-22","GitLab Duo Code Suggestions, part of the GitLab Duo suite of AI-powered workflows, is now generally available with GitLab 16.7. [Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/), our generative AI code creation assistant within our DevSecOps platform, helps developers to write secure code more efficiently and assists in improving cycle times by taking care of repetitive, routine coding tasks.\n\nAccording to GitLab's [2023 State of AI in Software Development](https://about.gitlab.com/developer-survey/#ai) report, 83% of DevSecOps professionals said it is essential to implement AI in their software development processes to avoid falling behind, and a majority were interested in using AI for code generation and code suggestions.\n\nAs DevSecOps teams incorporate AI in the software development lifecycle, tapping into easy-to-adopt features like Code Suggestions provides a good entry point to achieve improved efficiency, accuracy, and productivity while not compromising on security and governance.\n\n> [Try Code Suggestions for free](http://about.gitlab.com/solutions/code-suggestions/sales) through February 14.\n\n## Faster development with less context switching\n\nA developer's workload is more than just writing code; it involves extensive context switching to search through documentation, hunt for code examples, and work through trial and error. All of this interrupts the software development process, decreasing time to value.\n\nLeveraging generative AI, Code Suggestions helps boost developers' efficiency and effectiveness by assisting in reducing the time required for coding fundamental functions. It also helps in understanding and extending existing, and sometimes unfamiliar, codebases while helping ensure adherence to security best practices. Code Suggestions includes the following capabilities:\n\n- **Code generation:** automatically generates lines of code, including full functions, from single and multi-line comments as well as comment blocks \n- **Code completion:** automatically proposes new lines of code from a few typed characters\n\nCode Suggestions is available in 15 languages, including C++, C#, Go, Java, JavaScript, Python, PHP, Ruby, Rust, Scala, Kotlin, and TypeScript. GitLab editor extensions can be found in popular IDE marketplaces; VS Code, Visual Studio, JetBrains’ suite of IDEs, and Neovim are all supported. And, of course, Code Suggestions is available within GitLab’s Web IDE, giving developers a quick way to get up and running with GitLab Duo. Support for Code Suggestions is available for self-managed GitLab instances via a secure connection to GitLab cloud infrastructure.\n\nWatch this introduction to Code Suggestions:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ds7SG1wgcVM?si=9J9gX0qs5De2NXUC\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Discover more AI capabilities with GitLab Duo\n\nOrganizations can use AI to help streamline the entire DevSecOps lifecycle and ship better, more secure, software faster. GitLab Duo has 15 AI-assisted features that support everyone involved in software development. From planning and coding to testing to delivery, there's a [GitLab Duo](https://about.gitlab.com/gitlab-duo/) capability to help. \n\nFor example, [GitLab Duo Vulnerability Resolution](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/index.html#vulnerability-resolution) helps teams remediate vulnerabilities proactively with the assistance of generative AI. In addition, Discussion Summary assists in getting everyone up to speed and aligned on lengthy conversations within [GitLab Enterprise Agile Planning](https://about.gitlab.com/blog/gitlab-enterprise-agile-planning-add-on-for-all-roles/).\n\nOur approach to AI is resonating with our customers. For example, Amado Gramajo, Vice President of Infrastructure & DevOps at Nasdaq, recently shared his excitement about how GitLab Duo will help Nasdaq protect their intellectual property and stay in line with regulatory mandates.\n\nGitLab is the only platform that integrates AI throughout the entire software development lifecycle. As developers become more effective, GitLab helps security and operations team members to keep pace. GitLab has a privacy- and transparency-first approach to AI and [does not use customer code to train AI models](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/index.html#code-suggestions-data-usage). \n\n## Get started with Code Suggestions today\n\nCode Suggestions, which can be trialed for free from December 21 through February 14 (subject to [GitLab’s Testing Agreement](https://handbook.gitlab.com/handbook/legal/testing-agreement/)), is available as an add-on to GitLab subscriptions for an introductory price of $9 USD per user/month. [Contact us today](http://about.gitlab.com/solutions/code-suggestions/sales) to get started with Code Suggestions.\n",[9,1135,703],{"slug":1720,"featured":91,"template":680},"gitlab-duo-code-suggestions-is-generally-available","content:en-us:blog:gitlab-duo-code-suggestions-is-generally-available.yml","Gitlab Duo Code Suggestions Is Generally Available","en-us/blog/gitlab-duo-code-suggestions-is-generally-available.yml","en-us/blog/gitlab-duo-code-suggestions-is-generally-available",{"_path":1726,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1727,"content":1733,"config":1739,"_id":1741,"_type":14,"title":1742,"_source":16,"_file":1743,"_stem":1744,"_extension":19},"/en-us/blog/gitlab-duo-code-suggestions-python",{"title":1728,"description":1729,"ogTitle":1728,"ogDescription":1729,"noIndex":6,"ogImage":1730,"ogUrl":1731,"ogSiteName":693,"ogType":694,"canonicalUrls":1731,"schema":1732},"How GitLab Duo Code Suggestions helped me make long car rides fun","AI-powered Gitlab Duo Code Suggestions coupled with Remote Development workspaces helped me create a Python-based guessing game application to keep my daughter entertained.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679869/Blog/Hero%20Images/carride.jpg","https://about.gitlab.com/blog/gitlab-duo-code-suggestions-python","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab Duo Code Suggestions helped me make long car rides fun\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-08-03\",\n      }",{"title":1728,"description":1729,"authors":1734,"heroImage":1730,"date":1736,"body":1737,"category":10,"tags":1738},[1735],"Itzik Gan Baruch","2023-08-03","\nOn long car rides with my daughter I have two options: Let her watch a movie or continuously play a game with her where she tries to guess a random number in my head. I often opt to play the game, but recently, my energy and patience weren't at their peak. This led to a moment of inspiration. Why not develop a simple application that my daughter could play on her own? The only hurdle was that I haven't developed an application in years, and, since then, new languages such as Python have emerged. \n\nWith the help of the AI-powered feature [GitLab Duo Code Suggestions (Beta)](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html), I was able to quickly develop a Python application, despite having no prior knowledge of the programming language.\n\n## How I built the guessing game application\nMy application development journey began by launching the [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/) powered by VS Code. From there, I navigated to Preferences and [activated Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-for-an-individual-user), paving the way for my AI-assisted coding session.\n\nThanks to Code Suggestions, I had the flexibility to input code or compose comments in plain text English, and receiving automated code suggestions while typing. For instance, I included only these three comments, ultimately utilizing them to construct the entire application:\n- store a random number between 0 to 10 \n- print a welcome message to Yael, ask to guess a number between 0 to 10 \n- create a loop that will continue until the user guesses the number \n\nTo my surprise, not only did the code suggestions align with my comments, but they also grasped the underlying concept of my application, offering additional code without explicit comments. After making a few minor adjustments, here is the final code for the game:\n\n```python\nimport random\n\n# Store a random number between 0 to 10\nnumber = random.randint(0, 10)\ncounter = 0\n# Print a welcome message to Yael, ask to guess a number between 0 to 10\n\nprint(\"Welcome to the guessing game, Yael!\")\nprint(\"I am thinking of a number between 0 and 10, can you guess what it is?\")\n\n# Create a loop that will continue until the user guesses the number\n\nwhile True:\n   # Get the user's guess\n   guess = int(input(\"Guess my number: \"))\n   counter += 1\n\n\n  # Check if the user's guess is correct\n   if guess == number:\n       # If the guess is correct, print a message and break out of the loop\n       print(\"You guessed it! My number was\", number, \"and it took you\", counter, \"guesses.\")\n       break\n  \n   elif    guess \u003C number:\n       print(\"Your guess is too low.\")\n   else:\n       print(\"Your guess is too high.\")\n\n```\n\nWith the assistance of Code Suggestions, I was able to navigate the intricacies of Python coding, step by step. The suggested code not only aligned perfectly with my intentions, but also expanded my understanding of the programming language, enabling me to build a functional game. \n\nAfter thoroughly testing the guessing game application using the debugging tool in VS Code, I was delighted to find that it worked flawlessly! However, a new challenge arose: How could I make this game accessible to my daughter while in the car?\n\n## How to leverage GitLab Remote Development workspaces Beta\nIf you have young children, you're likely familiar with their constant need for instant gratification. To satisfy my daughter's desire to play the new game on her iPad right away, I needed a solution.\n\nSince the game wasn't available as a mobile or web application, I decided to utilize the power of [GitLab Remote Development workspaces](/blog/quick-start-guide-for-gitlab-workspaces/) to create a mobile environment for her.\n\nThe workspace is a temporary development environment hosted in the cloud, which offers a simple setup process and numerous advantages for developers. Now, you might wonder how this is relevant to our topic. Well, Remote Development workspaces provides a link to access the environment. This became my workaround to allow her to start playing the game immediately within that development environment directly from her iPad.\n\nThis strategy turned out to be the perfect workaround, not only allowing her to enjoy the game but also exposing her to the captivating world of programming.\n\n## Understanding beta features\nWhile my journey of developing a game in Python, with the help of Code Suggestions, has been incredibly valuable, it's important to acknowledge that the feature is currently in its beta phase. As is common with beta features, there are certain considerations to keep in mind. Due to the high demand and ongoing improvements, there may be occasional unscheduled downtime and potential delays in receiving Code Suggestions within IDEs. Additionally, it's worth noting that the suggestions generated by Code Suggestions may occasionally be of lower quality or incomplete. As Beta users, it is crucial to familiarize yourself with the [documented limitations](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html).\n\n## Demo\nThis [click-through demo](https://go.gitlab.com/HplUKw) showcases how I used Code Suggestions to develop the guessing game application. I encourage you to give Code Suggestions a try today as you will have a lot of fun.\n\n## We are looking for your feedback! \nFeedback from Beta users of Code Suggestions is invaluable. The GitLab team eagerly awaits your input, which will play an important role in further enhancing this feature and refining its capabilities. Together, we can shape the future of Code Suggestions and make it even more powerful and reliable. To send feedback, or report on issues, use the [Code Suggestions feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/405152). \n\n",[1135,704,9,703],{"slug":1740,"featured":6,"template":680},"gitlab-duo-code-suggestions-python","content:en-us:blog:gitlab-duo-code-suggestions-python.yml","Gitlab Duo Code Suggestions Python","en-us/blog/gitlab-duo-code-suggestions-python.yml","en-us/blog/gitlab-duo-code-suggestions-python",{"_path":1746,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1747,"content":1753,"config":1758,"_id":1760,"_type":14,"title":1761,"_source":16,"_file":1762,"_stem":1763,"_extension":19},"/en-us/blog/gitlab-duo-enterprise-is-now-available",{"title":1748,"description":1749,"ogTitle":1748,"ogDescription":1749,"noIndex":6,"ogImage":1750,"ogUrl":1751,"ogSiteName":693,"ogType":694,"canonicalUrls":1751,"schema":1752},"GitLab Duo Enterprise is now available","Organizations have an end-to-end AI partner for faster, more secure software development. Learn how GitLab Duo Enterprise supports the entire DevSecOps lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665660/Blog/Hero%20Images/Untitled__1800_x_945_px_.png","https://about.gitlab.com/blog/gitlab-duo-enterprise-is-now-available","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Enterprise is now available\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2024-09-03\",\n      }",{"title":1748,"description":1749,"authors":1754,"heroImage":1750,"date":1755,"body":1756,"category":10,"tags":1757},[1486],"2024-09-03","[GitLab Duo Enterprise]( https://about.gitlab.com/gitlab-duo/), now available, is an end-to-end AI partner designed for the entire software development lifecycle. This powerful suite of AI tools is designed to boost developer productivity, enhance security, streamline collaboration, and accelerate your DevSecOps processes.\n\nKey features at a glance:\n- Intelligent code assistance across 25+ programming languages\n- AI-powered security vulnerability details and resolution\n- Automated test generation and root cause analysis\n- Team collaboration enhancements with AI-driven summaries\n- ROI quantification through an AI Impact Dashboard\n\n## Why we developed GitLab Duo Enterprise\n\nAs organizations aim to deliver better software faster and create customer value, they encounter significant challenges that slow their progress. [Our research](http://about.gitlab.com/developer-survey/2024/ai) shows that 95% of organizations are either evaluating or using AI in the software development lifecycle. However, 55% of survey respondents said they feel using AI for software development is risky.\n\nCommon pain points in the enterprise include suboptimal developer experience and productivity, increasing security and compliance demands, inefficient collaboration across teams, and difficulty in assessing the ROI of AI technology investments. GitLab Duo Enterprise addresses these challenges head-on, providing a secure, efficient, and powerful AI partner for your development teams. \n\n**Let's explore how GitLab Duo Enterprise can transform the way your company creates and deploys software.**\n\n## Boost developer productivity with intelligent code assistance\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1004252678?h=83f35171b6&amp;badge=0&amp;badge=0&amp?autoplay=1&loop=1&autopause=0&background=1&muted=1\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Code Suggestions clip\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\nOne of the primary hurdles in software development is the time-consuming nature of routine coding tasks. Get to the most impactful work faster with:\n\n- __Code Suggestions__ supports more than 25 programming languages. This AI-powered tool accelerates code creation, improves code quality, and reduces the time spent on boilerplate tasks.\n\nBut it's not just about writing new code. \n\n- GitLab Duo Enterprise's __Code Explanation__ capability enables developers to quickly understand complex or unfamiliar code, while \n\n- **Code Refactoring** enables developers to [improve and modernize existing code](https://about.gitlab.com/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo/). \n\n- __Test Generation__ automates the creation of comprehensive unit tests. The result? Developers can focus on high-value tasks that drive innovation, leading to faster development cycles and improved software quality.\n\n> See how [European tech company Cube](https://about.gitlab.com/customers/cube/) uses Code Suggestions, Test Generation, and other GitLab Duo features to achieve improvements in speed and efficiency. \n\n## Enhance team collaboration and communication\n\nEffective collaboration is the cornerstone of successful software development, yet it's often hindered by lengthy discussions, complex merge requests, and time-consuming code reviews. GitLab Duo Enterprise addresses these challenges with its suite of summarization and templating tools:\n- __Discussion Summary:__ Allows team members to quickly get up to speed on lengthy conversations in issues\n- __Merge Request Summaries:__ Provide clear, concise overviews of proposed changes. \n- __Code Review Summaries:__ Streamline the review process, enabling better handoffs between authors and reviewers. \n\nBy facilitating clearer communication and faster decision-making, GitLab Duo Enterprise helps teams work more efficiently and deliver results more quickly.\n\n## Streamline troubleshooting and debugging\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1004252688?h=fc6c048bfd&amp;badge=0&amp;badge=0&amp?autoplay=1&loop=1&autopause=0&background=1&muted=1\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Root Cause Analysis clip\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\nWhen development pipelines fail, the impact on project timelines can be significant. GitLab Duo Enterprise's __Root Cause Analysis__ feature is a game-changer here. By automatically analyzing logs and providing detailed explanations of failures along with potential fixes, Root Cause Analysis significantly reduces the time spent on troubleshooting.\n\nThe benefits extend beyond just time savings. With [faster resolution of CI/CD build issues](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/), teams can maintain momentum, reduce downtime, and ultimately deliver software updates more frequently and reliably.\n\n## Elevate security across the development lifecycle\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1004252706?h=73e568b89c&amp;badge=0&amp;badge=0&amp?autoplay=1&loop=1&autopause=0&background=1&muted=1\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Vulnerability Explanation and Resolution clip\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\nCybersecurity threats are ever-present, so robust application security is a necessity. GitLab Duo Enterprise rises to this challenge with its __Vulnerability Explanation__ and __Vulnerability Resolution__ features. These AI-powered tools help [developers fully understand security vulnerabilities](https://about.gitlab.com/blog/developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities/) and then automatically generate merge requests with suggested fixes.\n\n## Quantify AI impact for strategic decision-making\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1004252663?h=d35106288b&amp;badge=0&amp?autoplay=1&loop=1&autopause=0&background=1&muted=1\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"AI Impact Dashboard clip\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\nDemonstrating the ROI of technology investments is crucial. GitLab Duo Enterprise addresses this need head-on with its __AI Impact Dashboard__. This analytics tool, built on top of Value Stream Analytics and DORA4 metrics, provides clear metrics on cycle time improvements and increased deployment frequencies, allowing organizations to quantify the tangible benefits of AI adoption in their development processes.\n\nBy offering insights into how AI usage correlates with key productivity metrics, the [AI Impact Dashboard](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/) empowers leadership to make data-driven decisions about resource allocation and strategic technology investments.\n\n## Embrace the future of AI-powered DevSecOps\n\nAs we unveil GitLab Duo Enterprise, we're proud to announce that GitLab has been recognized as a Leader in the inaugural [Gartner® Magic Quadrant™ for AI Code Assistants](https://about.gitlab.com/gartner-mq-ai-code-assistants/). This recognition underscores our commitment to delivering AI solutions that drive real business value.\n\nThe future of software development is here, and it's powered by AI. We're here to help you incorporate intelligent, scalable AI throughout the DevSecOps lifecycle so you can deliver results faster for your customers.\n\n> [Get started today with GitLab Duo Enterprise with a free 60-day trial!](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial&toggle=gitlab-duo-pro)\n",[9,725,482,704,1135],{"slug":1759,"featured":91,"template":680},"gitlab-duo-enterprise-is-now-available","content:en-us:blog:gitlab-duo-enterprise-is-now-available.yml","Gitlab Duo Enterprise Is Now Available","en-us/blog/gitlab-duo-enterprise-is-now-available.yml","en-us/blog/gitlab-duo-enterprise-is-now-available",{"_path":1765,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1766,"content":1771,"config":1778,"_id":1780,"_type":14,"title":1781,"_source":16,"_file":1782,"_stem":1783,"_extension":19},"/en-us/blog/gitlab-duo-pro",{"title":1767,"description":1768,"ogTitle":1767,"ogDescription":1768,"noIndex":6,"ogImage":861,"ogUrl":1769,"ogSiteName":693,"ogType":694,"canonicalUrls":1769,"schema":1770}," GitLab Duo Pro combines Code Suggestions, Chat, and organizational controls","AI-powered code completion, code generation, chat, and per-user assignment come to the GitLab DevSecOps Platform as a secure add-on.","https://about.gitlab.com/blog/gitlab-duo-pro","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \" GitLab Duo Pro combines Code Suggestions, Chat, and organizational controls\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Hillary Benson\"},{\"@type\":\"Person\",\"name\":\"Dave Steer\"}],\n        \"datePublished\": \"2024-01-17\",\n      }",{"title":1767,"description":1768,"authors":1772,"heroImage":861,"date":1775,"body":1776,"category":10,"tags":1777},[1773,1774],"Hillary Benson","Dave Steer","2024-01-17","> __This blog has been updated to include GitLab Duo Chat (in Beta) as part of the GitLab Duo Pro add-on.__\n\nLast year, we launched GitLab Duo Code Suggestions into general availability as the cornerstone of our GitLab Duo suite of AI capabilities that power DevSecOps workflows. We received incredibly positive feedback from our customers on the ease of use and effectiveness of Code Suggestions' code completion and code generation capabilities. \n\nNow, we are introducing [GitLab Duo Pro](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/), a new package that brings together Code Suggestions, Chat, and organizational control capabilities, ensuring that teams can take advantage of AI exactly where they need it throughout the software development lifecycle.\n\n> [Get started with GitLab Duo Pro](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/) \n\nGitLab Duo Pro, which is available to Ultimate and Premium customers as an add-on, boosts developer efficiency and effectiveness by decreasing the time required to write and understand code. With GitLab Duo Pro, organizations can set up their DevSecOps teams for success by giving them the AI-assisted tools they need to develop secure code faster, improve collaboration, and reduce the security and compliance risks of AI adoption.\n\nGitLab Duo Pro features:\n- Code Suggestions: AI-powered code completion and code generation\n- Chat (Beta): Provides real-time guidance on coding, refactoring, and test generation\n- Privacy-first approach: Your code stays your code — GitLab does not use it for training or fine-tuning AI models\n- Organizational controls: GitLab Duo Pro enables organizations to have greater control over AI by limiting AI usage to approved users only\n\nGitLab Duo Pro is available across SaaS, self-managed, and Dedicated deployments.\n\n## What is GitLab Duo Pro?\n\nWith GitLab Duo Pro, developers can access [Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) to generate blocks of code from single- and multi-line comments as well as comment blocks. Code Suggestions also autocompletes lines of code from a few typed characters — improving cycle times by securely taking care of repetitive, routine coding tasks.\n\nGitLab Duo Pro also includes [Chat](https://about.gitlab.com/blog/gitlab-duo-chat-beta/) (Beta), which is helpful for technical and non-technical users across the entire software development lifecycle. Chat assists in explaining unfamiliar code, suggesting and generating tests, and simplifying code. You can also use Chat to write code from scratch interactively. Whether you are onboarding to GitLab or you are already an expert, learning how to use GitLab is streamlined with Chat.\n\nOrganizational controls are provided in GitLab Duo Pro for better management of AI capabilities, including per-user assignment, so only approved users can use AI.\n\nGitLab Duo Pro is built with privacy as a critical foundation. Private, non-public customer code stored in GitLab is not used for training or fine-tuning AI models. Learn about [data usage when using GitLab Duo Pro Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/index.html#code-suggestions-data-usage).\n\n## Get GitLab Duo Pro today\n\nGitLab Duo Pro is currently available at a special introductory price of $9 USD per user/month to Ultimate and Premium customers. Beginning on February 1, 2024, GitLab Duo Pro will be available for $19 USD per user/month. [Contact us today](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/) to get started with GitLab Duo Pro.\n",[9,482,703],{"slug":1779,"featured":91,"template":680},"gitlab-duo-pro","content:en-us:blog:gitlab-duo-pro.yml","Gitlab Duo Pro","en-us/blog/gitlab-duo-pro.yml","en-us/blog/gitlab-duo-pro",{"_path":1785,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1786,"content":1792,"config":1798,"_id":1800,"_type":14,"title":1801,"_source":16,"_file":1802,"_stem":1803,"_extension":19},"/en-us/blog/gitlab-duo-self-hosted-enterprise-ai-built-for-data-privacy",{"title":1787,"description":1788,"ogTitle":1787,"ogDescription":1788,"noIndex":6,"ogImage":1789,"ogUrl":1790,"ogSiteName":693,"ogType":694,"canonicalUrls":1790,"schema":1791},"GitLab Duo Self-Hosted: Enterprise AI built for data privacy","Customers in regulated industries can now deploy GitLab Duo on self-managed infrastructure, leveraging the power of generative AI while helping to address data residency and privacy concerns.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097840/Blog/Hero%20Images/Blog/Hero%20Images/Self-Hosted%201800x945_1dL1II2ITh2PteObA9DBLD_1750097839679.png","https://about.gitlab.com/blog/gitlab-duo-self-hosted-enterprise-ai-built-for-data-privacy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Self-Hosted: Enterprise AI built for data privacy\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Susie Bitters\"},{\"@type\":\"Person\",\"name\":\"Aathira Nair\"}],\n        \"datePublished\": \"2025-02-27\",\n      }",{"title":1787,"description":1788,"authors":1793,"heroImage":1789,"date":1795,"body":1796,"category":10,"tags":1797},[1255,1794],"Aathira Nair","2025-02-27","We are excited to announce the general availability of GitLab Duo Self-Hosted for Code Suggestions and Chat. An optional capability for self-managed customers with a GitLab Duo Enterprise subscription, GitLab Duo Self-Hosted supports deployment flexibility across multiple platforms, including on-premises infrastructure or in private clouds and secure cloud environments through AWS Bedrock and Azure OpenAI. GitLab Duo Self-Hosted empowers teams to innovate with AI while helping them maintain control over sensitive data and intellectual property. \n\nSecurity concerns have been a major barrier to AI adoption in regulated industries. In our [Global DevSecOps Survey](http://about.gitlab.com/developer-survey/2024/ai), more than half of the respondents said that introducing AI into the software development lifecycle is risky. With [GitLab Duo](https://about.gitlab.com/gitlab-duo/), we gave organizations a way to ship more secure software faster with AI throughout the entire software development lifecycle.\n\nGitLab Duo Self-Hosted expands the availability of GitLab Duo AI features to organizations with stringent data privacy requirements, offering flexibility in both AI large language model (LLM) selection and deployment options. The earliest adopters of GitLab Duo Self-Hosted include organizations in the public sector and regulated industries  – e.g., financial services, automotive, and healthcare. These organizations seek to gain the competitive advantage of AI by integrating AI-powered development tools into their environments, while also giving security teams the control they need.\n\nAs one U.S. government agency says: “After selecting GitLab as the cornerstone of our agency-wide DevSecOps platform, we chose GitLab Duo Self-Hosted to further advance our software factory capabilities. GitLab Duo’s ability to operate in air-gapped environments and provide granular control over our data was crucial to delivering secure AI-powered features. This unified approach streamlines our workflow and strengthens security, allowing us to leverage AI for increased productivity while meeting strict compliance requirements.” \n\n![GitLab Duo Self-Hosted models](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097848/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097848329.png)\n\n## Architect secure AI deployments\n\nGitLab Duo Self-Hosted enables GitLab Duo features that leverage a curated selection of leading AI LLMs, including those from Anthropic, Mistral, and OpenAI. Here are the LLMs supported by GitLab today:\n\n* On-premises - Mistral models with the vLLM serving platform  \n* AWS - Mistral and Anthropic Claude 3.5 Sonnet via AWS Bedrock  \n* Microsoft Azure - OpenAI GPT models via Azure AI\n\nWe are evaluating more models to support in the near future. [Learn more about the LLMs we support.](https://docs.gitlab.com/ee/administration/self_hosted_models/supported_models_and_hardware_requirements.html#approved-llms)\n\nGitLab Duo Self-Hosted deployment options include on-premises installations powered by the open-source vLLM framework, as well as private-cloud deployments via services like AWS Bedrock and Microsoft Azure AI. This flexibility helps organizations to architect AI solutions that align with their unique security, compliance, and performance requirements.\n\n## Simplify AI/ML implementation\n\nGitLab Duo's AI abstraction layer standardizes and simplifies the integration of the chosen LLM to a feature, mitigating the burden of implementing AI/ML technologies. This enables companies to streamline their AI adoption efforts and enhance the developer experience, free from the complexities of integrating and maintaining multiple tools.\n\n![GitLab Duo Self-Hosted AI-powered features](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097848/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097848330.png)\n\n## Maintain control of sensitive data\n\nBy isolating your GitLab instance, AI gateway, and LLMs in your own environment or country of choice, GitLab Duo Self-Hosted makes it possible that sensitive data and intellectual property remain within your designated perimeter. Granular control over data locality helps enable adherence to strict data residency regulations, while adopting AI capabilities in secure settings. Whether you use GitLab Duo Self-Hosted in a completely air-gapped environment with vLLM or leverage a supported private cloud, you can control all aspects of the deployment to include the geographic location of components. By eliminating the reliance on external APIs and providing full visibility into all request and response logs, GitLab Duo Self-Hosted helps even the most regulated organizations confidently adopt AI capabilities and meet the most stringent compliance obligations.\n\n**Start an interactive tour of GitLab Self-Hosted by clicking on the image below:**\n\n[![GitLab Duo Self-Hosted tour screenshot](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097848/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2025-02-20_at_7.00.34_AM_aHR0cHM6_1750097848332.png)](https://gitlab.navattic.com/gitlab-duo-self-hosted)\n\n## Get started with GitLab Duo Self-Hosted today\n\nIf you're ready to advance your AI journey while addressing security and data privacy, [reach out to us](https://about.gitlab.com/sales/) to help set up GitLab Duo Self-Hosted in your environment today.",[9,704,482,725,1135],{"slug":1799,"featured":91,"template":680},"gitlab-duo-self-hosted-enterprise-ai-built-for-data-privacy","content:en-us:blog:gitlab-duo-self-hosted-enterprise-ai-built-for-data-privacy.yml","Gitlab Duo Self Hosted Enterprise Ai Built For Data Privacy","en-us/blog/gitlab-duo-self-hosted-enterprise-ai-built-for-data-privacy.yml","en-us/blog/gitlab-duo-self-hosted-enterprise-ai-built-for-data-privacy",{"_path":1805,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1806,"content":1811,"config":1816,"_id":1818,"_type":14,"title":1819,"_source":16,"_file":1820,"_stem":1821,"_extension":19},"/en-us/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws",{"title":1807,"description":1808,"ogTitle":1807,"ogDescription":1808,"noIndex":6,"ogImage":1309,"ogUrl":1809,"ogSiteName":693,"ogType":694,"canonicalUrls":1809,"schema":1810},"GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available","The comprehensive AI-powered DevSecOps platform combined with the deepest set of cloud computing capabilities speeds dev cycles, increases automation, and improves code quality.","https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emilio Salvador\"}],\n        \"datePublished\": \"2025-04-17\",\n      }",{"title":1807,"description":1808,"authors":1812,"heroImage":1309,"date":1813,"body":1814,"category":10,"tags":1815},[887],"2025-04-17","Today, we're excited to announce the general availability of [GitLab Duo with Amazon Q](https://about.gitlab.com/partners/technology-partners/aws/), delivering agentic AI throughout the software development lifecycle for AWS customers. GitLab Duo with Amazon Q, based on GitLab Ultimate, includes many familiar features such as code completion, code explanation, code generation, chat, and vulnerability explanation and resolution – all of which are now powered by Amazon Q. It is available with a Self-Managed deployment model for customers on Amazon Web Services (AWS).\n\nWith Amazon Q's agents directly embedded into GitLab's DevSecOps platform, developers maintain their familiar development environment while gaining powerful AI capabilities. The result is a frictionless experience that helps accelerate development cycles, reduce manual effort, and enhance code quality.\n\n“Participating in the early access program for GitLab Duo with Amazon Q has given us a glimpse into its transformative potential for our development workflows,” said Osmar Alonso, DevOps Engineer, Volkswagen Digital Solutions. “Even in its early stages, we saw how the deeper integration with autonomous agents could streamline our process, from code commit to production. We're excited to see how this technology empowers our team to focus on innovation and accelerate our digital transformation.\"\n\n## Agentic AI comes to complex customer environments\n\nBy combining agentic AI with secure, reliable cloud infrastructure, GitLab and AWS bring built-in security, scale, and reliability to complex customer environments, enabling them to realize the following benefits:\n\n__Unified developer experience for streamlined development__\n\nDevelopers can interact with Amazon Q through the GitLab Duo Chat interface from their preferred IDE or the GitLab web interface. This eliminates the need for context switching in other tools and helps developers stay focused on the project that they’re working on.\n\n__One solution for the entire software development lifecycle__\n\nCode suggestions and optimizations leverage AWS-specific patterns and practices, while testing tools understand AWS service interactions and dependencies. A common data store across all stages provides essential context to AI agents, enabling complete visibility and traceability for relevant actions.\n\n__Secure development with enterprise-grade guardrails__\n\nEnd-to-end security and compliance are built directly into the development platform with guardrails that help reduce risk without impeding velocity. This secure software development approach enforces transparency and auditability through AI agents while seamlessly integrating with AWS security services and compliance frameworks.\n\n## How to start using GitLab Duo with Amazon Q\n\nHere are five initial use cases we’re targeting to help teams build secure software faster with agentic AI: \n\n1. **Feature development acceleration** - Create issue descriptions, generate implementation plans based on your existing codebase, and produce complete merge requests ready for review. This drives feature delivery acceleration while maintaining consistency with internal development standards.  \n2. **Legacy application modernization** - Analyze your legacy Java codebase, create a comprehensive upgrade plan, and generate a merge request with all necessary code changes. This unlocks faster Java upgrade time, while providing a clear audit trail of all code transformations. Support for .NET and other languages is planned for future releases.  \n3. **Quality assurance enhancement** - Analyze code and automatically create comprehensive unit tests that understand your application logic and AWS service interactions. This increases test coverage, reduces manual test writing effort, and helps ensure consistent test quality across applications.  \n4. **Code review optimization** - Provide inline feedback on code changes, suggesting improvements based on development standards, highlighting security and performance considerations. This enables reduced code review cycles and delivery of higher-quality code merges for deployment.  \n5. **Vulnerability remediation** - Explain detected vulnerabilities in clear, detailed terms and one-click remediation based on recommended code changes, helping to significantly reduce the time from detection to remediation.\n\nWatch GitLab Duo with Amazon Q in action:\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1075753390?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Technical Demo: GitLab Duo with Amazon Q\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n> #### Get the benefits of GitLab Duo with Amazon Q today\n> GitLab's unified, AI-powered DevSecOps platform with Amazon Q's advanced AI capabilities provides AWS customers with a solution that transforms how teams build and deploy software. To learn more about GitLab Duo with Amazon Q visit us at an upcoming [AWS Summit in a city near you](https://about.gitlab.com/events/aws-summits/) or [reach out to your GitLab representative](https://about.gitlab.com/partners/technology-partners/aws/#form).",[9,482,787,725,704,1135],{"slug":1817,"featured":91,"template":680},"gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws","content:en-us:blog:gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws.yml","Gitlab Duo With Amazon Q Agentic Ai Optimized For Aws","en-us/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws.yml","en-us/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws",{"_path":1823,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1824,"content":1829,"config":1834,"_id":1836,"_type":14,"title":1837,"_source":16,"_file":1838,"_stem":1839,"_extension":19},"/en-us/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai",{"title":1825,"description":1826,"ogTitle":1825,"ogDescription":1826,"noIndex":6,"ogImage":1309,"ogUrl":1827,"ogSiteName":693,"ogType":694,"canonicalUrls":1827,"schema":1828},"GitLab Duo with Amazon Q: DevSecOps meets agentic AI","AI-powered DevSecOps enhanced with autonomous AI agents accelerates developer productivity, application modernization, and innovation.","https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo with Amazon Q: DevSecOps meets agentic AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emilio Salvador\"}],\n        \"datePublished\": \"2024-12-03\",\n      }",{"title":1825,"description":1826,"authors":1830,"heroImage":1309,"date":1831,"body":1832,"category":10,"tags":1833},[887],"2024-12-03","We're excited to announce GitLab Duo with Amazon Q, a joint offering that brings together GitLab's comprehensive AI-powered DevSecOps platform with Amazon Q's autonomous AI agents in a single, integrated solution.\n\nGitLab Duo with Amazon Q transforms software development by integrating powerful AI agents directly into your daily workflows. Instead of switching between multiple tools, developers can now accelerate key tasks — from feature development to code reviews — all from within GitLab's comprehensive DevSecOps platform. Amazon Q’s AI agents act as intelligent assistants, automating time-consuming tasks like generating code from requirements, creating unit tests, conducting code reviews, and modernizing Java applications. By handling these complex tasks, this joint offering helps teams focus on innovation, while maintaining security and quality standards.\n\nThis enterprise-class developer experience includes:\n* The GitLab unified platform with one single data store, which automates the building, testing, packaging, and deployment of secure code\n* GitLab Duo, enhanced with Amazon Q developer, which leverages GitLab project context to generate multi-file changes based on the task\n* Amazon Q AI agents integrated with GitLab Duo, updating issues and creating merge requests per task, with permission scoped to the project\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1033653810?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"GitLab Duo and Amazon Q\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Partnership innovation: GitLab and AWS\n\nGitLab Duo with Amazon Q is the result of close collaboration between GitLab and AWS engineering teams, combining our strengths to transform software development. This partnership unites GitLab's expertise in unified DevSecOps with AWS's leadership in cloud computing, creating an innovative solution that understands how developers work.\n\nBy integrating Amazon Q's autonomous agents with GitLab's comprehensive AI-powered platform, we've built more than a technical integration. We've created an experience that makes AI-powered development feel natural and upholds the security, compliance, and reliability that enterprises require.\n\nIndustry analysts recognize the significance of this integration in advancing AI-powered software development:\n\n***\"With this joint offering, GitLab and AWS are combining their strengths to make agentic AI a reality in software development,\" said Katie Norton, Research Manager at IDC. \"GitLab Duo with Amazon Q addresses strong use cases and critical challenges that empower customers to harness the full potential of AI.\"***\n\n***\"Both developers and the organizations they work for are increasingly interested in simplified and unified experiences,\" says Rachel Stephens, senior analyst at RedMonk. \"Especially in the era of AI – when security and privacy are paramount concerns – organizations want to both harness the power of cutting edge technology while also controlling risk and minimizing disjointed software tool chains. The partnership between GitLab Duo and Amazon Q seeks to give developers the tools they need within the context of an end-to-end DevSecOps experience.\"***\n\n## 4 key customer benefits \n\nGitLab Duo with Amazon Q pairs AI-powered DevSecOps with the deepest set of cloud computing capabilities. Together, they help development teams:\n\n### 1. Streamline feature development from idea to code \n\nDevelopment teams often spend hours translating requirements into code, leading to slower delivery and inconsistent implementation. You can now invoke the GitLab Duo with Amazon Q agent by utilizing a new quick action `/q dev`, which will convert an issue description directly into merge-ready code in minutes. The agent analyzes requirements, plans the implementation, and generates a complete merge request — all while adhering to your team's development standards. Teams can iterate rapidly using feedback in comments, significantly reducing the time from idea to production.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1034050110?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Feature Dev with Rev\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 2. Modernize legacy code without the headache \n\nUpgrading Java applications traditionally requires weeks of careful planning, manual code changes, and extensive testing. By using quick action `/q transform`, you can change this by automating the entire Java modernization process. In minutes, not hours, the agent analyzes your Java 8 or 11 codebase, creates a comprehensive upgrade plan, and generates fully documented merge requests for Java 17 migration. Every change is tracked and traceable, giving teams confidence while improving application security and performance.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1034050145?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"QCT\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 3. Accelerate code reviews without sacrificing quality \n\nCode reviews often create bottlenecks: Teams wait days for feedback yet must maintain consistent standards. With the `/q review` quick action, you get instant, intelligent feedback on code quality and security directly in merge requests. By automatically identifying potential issues and suggesting improvements based on your standards, teams can maintain high-quality code while dramatically reducing review cycles.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1034050136?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Code Reviews\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 4. Automate testing to ship with confidence\n\nManual test creation is time-consuming and often leads to inconsistent coverage across teams. With the `/q test` quick action, you can automatically generate comprehensive unit tests that understand your application logic. The agent ensures thorough coverage of critical paths and edge cases, matching your existing testing patterns. This automation helps teams catch issues earlier and maintain consistent quality standards, saving valuable developer time.\n\n\u003Cdiv style=\"padding:54.37% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1034050181?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Use GitLab Duo with Amazon Q to add tests\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Enterprise-grade security and guardrails included\n\nBuilt for enterprise scale and security, this offering combines GitLab's integrated security, compliance, and privacy with Amazon Q's AI agent, accelerating developer workflows to help organizations ship secure software faster.\n\nThe integration features:\n\n* Built-in guardrails that maintain development velocity  \n* Granular controls for AI-powered features at user, project, and group levels  \n* End-to-end security integration with existing workflows\n\nDevSecOps teams can securely scale the development environment with the world's most broadly adopted cloud.\n\n## What's next\n\nGitLab Duo with Amazon Q builds on our existing integration with [AWS announced in May 2024](https://press.aboutamazon.com/2024/4/aws-announces-general-availability-of-amazon-q-the-most-capable-generative-ai-powered-assistant-for-accelerating-software-development-and-leveraging-companies-internal-data), representing a significant step forward in our joint mission to transform software development. This deeper integration of AI capabilities marks the beginning of our expanded collaboration with AWS. As we continue to evolve these capabilities, we'll focus on:\n\n* Extending AI features across the development lifecycle  \n* Enhancing developer productivity  \n* Meeting enterprise development demands at scale\n\n**GitLab Duo with Amazon Q is available today on a [public branch](https://gitlab.com/groups/gitlab-org/-/epics/16059) in the GitLab.org project. To get access to a preview and learn more about how it can transform your software development process, visit [our website](https://about.gitlab.com/partners/technology-partners/aws/#interest).**",[1135,787,9,482,231],{"slug":1835,"featured":91,"template":680},"gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai","content:en-us:blog:gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai.yml","Gitlab Duo With Amazon Q Devsecops Meets Agentic Ai","en-us/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai.yml","en-us/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai",{"_path":1841,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1842,"content":1848,"config":1854,"_id":1856,"_type":14,"title":1857,"_source":16,"_file":1858,"_stem":1859,"_extension":19},"/en-us/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai",{"title":1843,"description":1844,"ogTitle":1843,"ogDescription":1844,"noIndex":6,"ogImage":1845,"ogUrl":1846,"ogSiteName":693,"ogType":694,"canonicalUrls":1846,"schema":1847},"GitLab Duo Workflow: Enterprise visibility and control for agentic AI","Secure, autonomous, context-aware AI agents take on complex tasks, freeing developers to ship innovative software faster. Private beta waitlist now open.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749660174/Blog/Hero%20Images/Workflow_1800x945.png","https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Workflow: Enterprise visibility and control for agentic AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Pini Wietchner\"}],\n        \"datePublished\": \"2025-02-24\",\n      }",{"title":1843,"description":1844,"authors":1849,"heroImage":1845,"date":1851,"body":1852,"category":10,"tags":1853},[1850],"Pini Wietchner","2025-02-24","Today, we're excited to announce the opening of the waitlist for the [private beta of GitLab Duo Workflow](https://about.gitlab.com/gitlab-duo/workflow): **agentic AI built on top of the most comprehensive DevSecOps platform.** The next step in our AI roadmap, GitLab Duo Workflow will help development teams navigate everything from project bootstrapping to deployment processes, from debugging issues to cross-team coordination, all within the IDE. \n\nGitLab Duo Workflow leverages the GitLab platform's structure for collaboration, continuous integration, continuous deployment, security, and compliance to help organizations as they accelerate their development process with AI agents. \n\nUse GitLab Duo Workflow to help you:   \n* [bootstrap a new development project](#from-slow-project-setup-to-a-running-start)  \n* [modernize code](#from-legacy-code-to-modern-applications)  \n* [perform contextual tasks](#from-context-switching-to-flow-state) \n* [create documentation](#from-stale-docs-to-dynamic-knowledge)\n* [enhance test coverage](#from-patchy-to-comprehensive-testing) \n* and more  \n\nThis is just the beginning. With GitLab’s unified data store, the more you use GitLab, the more context GitLab Duo Workflow has about your code, configurations, security findings, and deployment practices. The result: an increasingly powerful development experience that's tailored to your organization.\n\n## The promise and challenge of AI agents\n\nSoftware has fundamentally changed the world, but only a tiny fraction of the world's population has the skills to build software today. Yet, these developers reach billions of people with smartphones and internet connections. Just imagine a world where *more* people can build, secure, and deliver production-ready software – there will be an explosion of innovation as more people can create software that impacts billions. **Agentic AI will make that happen.**\n\nAI agents understand context, maintain knowledge of entire codebases, and actively collaborate on complex software projects across development, security, and operations. With AI agents, developers can create software at a scale previously unimaginable for individuals or even teams.\n\nBut this shift raises important questions about visibility, control, and how AI will impact developers' work. Organizations need to ensure AI enhances their developers' capabilities while enabling them to maintain oversight of their development process. The key to success isn't just adopting AI – it's adopting it in a way that empowers developers while preserving security, compliance, and governance.\n\n## AI's success depends on your platform, not more add-on tools\n\nWhen you're working with more developers, code, and potential security risks, adding separate tools for each new challenge only creates more complexity. Our most recent [DevSecOps Survey](https://about.gitlab.com/the-source/platform/devops-teams-want-to-shake-off-diy-toolchains-a-platform-is-the-answer/) shows just how serious this problem is: DevSecOps teams are juggling up to 14 different tools, with professionals spending up to 80% of their time on non-coding tasks. For AI to be truly effective, it also needs high-quality, unified data. That's hard to achieve with disparate tools.\n\n**The GitLab DevSecOps platform combined with GitLab AI agents** brings everything together in a single data model that encapsulates source code, merge requests, epics, users, access rights, and more. The agents we're building use context about users and projects to standardize how teams work and automate the non-coding tasks that absorb developer time, such as scanning for security issues and enforcing compliance rules. When AI is built directly into the platform, these capabilities become even more powerful, turning AI agents into development partners while keeping you in control of how AI enhances the process.\n\n**This isn't a far-off future — it's what we're building right now with GitLab Duo Workflow.**\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1059060959?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"GitLab Duo Workflow, the future of secure agentic AI software development\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>`\n\n## GitLab Duo Workflow: AI agents on the most comprehensive DevSecOps platform\n\nLeveraging GitLab's end-to-end DevSecOps platform, GitLab Duo Workflow helps developers work at their highest potential. While AI coding assistants help with individual pieces of code, GitLab Duo Workflow will understand your entire development lifecycle – automating routine tasks so developers can focus on strategic innovation and creative problem-solving. As we develop GitLab Duo Workflow, here’s what it will be able to help teams achieve: \n\n### From slow project setup to a running start\n\nDevelopers spend precious time configuring new projects, managing dependencies, and setting up basic infrastructure instead of building new features. With GitLab Duo Workflow, you can **automate project bootstrapping directly in the IDE**, providing the right configurations from the start so you can focus on innovation sooner.\n\n### From legacy code to modern applications\n\nModernizing legacy code is more than just updating syntax — it requires understanding dependencies, tests, CI/CD pipelines, and documentation. GitLab Duo Workflow helps **modernize your codebase by handling code refactoring** – from code to tests.\n\n### From context switching to flow state\n\nToday, developers constantly switch between tools, docs, and codebases to solve problems. GitLab Duo Workflow will help **resolve tasks with the full context of your codebase-related issues and merge requests**, letting developers stay in their flow.\n\n### From stale docs to dynamic knowledge\n\nDocumentation becomes stale quickly, making codebases harder to understand and maintain. GitLab Duo Workflow **supports developers in generating and updating documentation**, including README files, code flow diagrams, and architecture documentation.\n\n### From patchy to comprehensive testing\n\nAs codebases grow, maintaining comprehensive test coverage becomes increasingly challenging. GitLab Duo Workflow **can generate tests for entire sections of your codebase** while integrating with your existing test infrastructure, ensuring more reliable software with less effort.\n\n## Sign up for the private beta waitlist\n\n[Sign up for the GitLab Duo Workflow private beta waitlist](https://about.gitlab.com/gitlab-duo/workflow) to see the next step in our vision for secure agentic AI – from project setup to deployment. Built on GitLab's DevSecOps platform, these agents understand your entire software lifecycle while maintaining the enterprise-grade security and control organizations require.\n\n*Disclaimer: This page contains information about upcoming products, features, and functionality. This information is for informational purposes only and should not be relied upon for purchasing or planning. All items are subject to change or delay, and the development, release, and timing remain at GitLab Inc.'s sole discretion.*",[482,9,704,725,1135,746],{"slug":1855,"featured":91,"template":680},"gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai","content:en-us:blog:gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai.yml","Gitlab Duo Workflow Enterprise Visibility And Control For Agentic Ai","en-us/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai.yml","en-us/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai",{"_path":1861,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1862,"content":1867,"config":1873,"_id":1875,"_type":14,"title":1876,"_source":16,"_file":1877,"_stem":1878,"_extension":19},"/en-us/blog/gitlab-flow-duo",{"title":1863,"description":1864,"ogTitle":1863,"ogDescription":1864,"noIndex":6,"ogImage":820,"ogUrl":1865,"ogSiteName":693,"ogType":694,"canonicalUrls":1865,"schema":1866},"Combine GitLab Flow and GitLab Duo for a workflow powerhouse ","Add the AI-powered capabilities of GitLab Duo to GitLab Flow to boost the efficiency of DevSecOps workflows. This is a guide for deployment in your environment, including a video tutorial.","https://about.gitlab.com/blog/gitlab-flow-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Combine GitLab Flow and GitLab Duo for a workflow powerhouse \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2023-07-27\",\n      }",{"title":1863,"description":1864,"authors":1868,"heroImage":820,"date":1869,"body":1870,"category":10,"tags":1871,"updatedDate":1872},[720],"2023-07-27","Starting out with DevSecOps requires a well-thought-out workflow, but that can sometimes seem like a daunting challenge. Luckily, there are two things that can help: GitLab Flow and GitLab Duo. GitLab Flow is a prescribed approach to help organizations successfully apply DevSecOps processes. GitLab Duo is a [powerful set of AI-powered capabilities](https://about.gitlab.com/blog/supercharge-productivity-with-gitlab-duo/) within the GitLab DevSecOps Platform that can help organizations develop code, improve operations, and secure software more efficiently. Combined, GitLab Flow and GitLab Duo can help organizations achieve significant improvements in end-to-end workflow efficiency, which can lead to even higher levels of productivity, deployment frequency, code quality and overall security, and production resiliency and availability.\nIn this article, we delve into how GitLab Flow and GitLab Duo can be used together to help organizations be successful with DevSecOps.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n## What is GitLab Flow?\nGitLab Flow is a prescribed and opinionated end-to-end workflow for the development lifecycle of applications when using GitLab, an AI-powered DevSecOps platform with a single user interface and a single data model. GitLab Flow is based on best practices and lessons learned from customer feedback and our dogfooding. Furthermore, GitLab Flow spans across the [stages of the DevSecOps lifecycle](https://about.gitlab.com/stages-devops-lifecycle/), forming an efficient workflow with an inner feedback loop for reviewing a specific update and an outer feedback loop for improving the entire application, as well as the development lifecycle itself. \n\n![The GitLab Flow inner and outer loops](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/The-GitLab-Flow-2023-feedback-loops.png)\n\u003Ccenter>The GitLab Flow inner and outer loops\u003C/center>\u003Cp>\u003C/p>\n\nAs you can see by the many stages in GitLab Flow, there is much more to developing software than writing code. Below, we'll dive into each step of GitLab Flow and how GitLab Duo can help. \n\n### Planning\nThe first portion of GitLab Flow is planning, which sits on the outer feedback loop of GitLab Flow. It encompasses issues, merge requests, epics, milestones, iterations, release, release evidence, and more. Let’s cover what roles these components play in GitLab Flow and how GitLab Duo can help.\n\n![Planning - first portion of GitLab Flow](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/The-GitLab-Flow-2023-planning-portion.png)\n\u003Ccenter>Planning - first portion of GitLab Flow\u003C/center>\u003Cp>\u003C/p>\n\n#### Issues\nIssues are where product problems or new features are defined and where team members can collaborate. As an issue is created, you can populate its title and then leverage GitLab Duo **Issue description generation** capability to help enrich the description field, saving time and effort. Because many stakeholders can participate in comment threads on an issue, **Discussion summary** is an AI-powered capability in GitLab Duo that can summarize hundreds of comments on an issue into a concise paragraph so that a stakeholder can quickly get caught up with the conversation, jump into the discussion, and become productive right away.\n\nIssues can be organized and visualized in issue boards, which are a software project management tool that can be used as kanban or Scrum boards. These boards help teams plan, organize, and visualize a workflow for a feature or product release. Different categories of boards can be created and issues can be moved from one board to another one with a simple drag and drop.\n\n#### Merge requests\nMerge requests are where solutions are developed. As release components, issues and merge requests provide the auditability and tracking of application changes done by stakeholders, such as DevOps and platform engineers, system and database administrators, security engineers, and developers. In addition, issues and merge requests are key inputs for the release planning process.\n\nMerge requests can be individually created or created from an issue. Creating a merge request from an issue automatically relates it to that issue so when the merge request is merged its associated issue is automatically closed. Merge requests can also be manually related to an issue.\n\n![Merged merge request will close issue](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/mr-with-its-issue.png)\n\u003Ccenter>Merged merge request will close issue\u003C/center>\u003Cp>\u003C/p>\n\nLike issues, merge requests can include a long list of updates to a feature branch by many stakeholders. Collaborators who need to familiarize themselves with or understand all of the updates included in a merge request can take advantage of the **Merge request summary** capability in GitLab Duo to quickly get caught up on the changes. In addition, collaborators can invoke GitLab Duo **Code Merge request template population**, which uses a pre-created merge request template and automatically fills in the content for sections in it. Description templates provide a way to standardize and optimize collaboration and communication across the development lifecycle and GitLab Duo speeds this up even more!\n\nIssues with the same theme can be grouped together in an epic to organize the work to be done. Epics can have child issues and sub-epics and/or be linked to epics across the organization. Iterations can be used to track sprints of work, and can be manually scheduled or scheduled automatically using GitLab iteration cadences to streamline planning workflows. In addition, iterations include burndown and burnup charts. Burndown charts help track overall progress towards a project's total scope, while burnup charts track the daily total count and weight of issues added to and completed in a given timebox.\n\n#### Milestones\nTeams can use milestones to organize issues and merge requests into a cohesive group with an optional start date and an optional due date. Milestones are typically used to track releases and can track issues and merge requests at a project level or group level. Similar to iterations, milestones also provide burndown and burnup charts to show progress.\n\nMilestones can be associated with a release, whose automated creation generates many artifacts, including the release evidence. The release evidence is an automatically collected snapshot of data that’s related to the release. In addition to test artifacts and linked milestones, job artifacts can optionally be included in the release evidence, which can facilitate internal processes such as external audits.\n\nEpics, milestones, and iterations can be visualized via the Roadmaps page, which helps track release progress and streamline the release process. \n\nOnce the planning takes place, the work towards the resolution of a problem or a new feature can start. This happens in merge requests. Let’s delve deeper into how that happens in GitLab Flow. \n\n> [Learn more by trying GitLab Flow and GitLab Duo](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com%2Fblog%2F).\n\n### Merge requests and pushing code\n\n![Merge requests and pushing code - second portion of GitLab Flow](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/The-GitLab-Flow-2023-mr-pushing-code-portion.png)\n\u003Ccenter>Merge requests and pushing code - second portion of GitLab Flow\u003C/center>\u003Cp>\u003C/p>\n\nThe second portion of GitLab Flow is related to merge requests and pushing code. As mentioned earlier, merge requests are where solutions are developed through collaboration among stakeholders across the organization. This collaboration can happen in a distributed manner and asynchronously. Participants can take advantage of collaborative capabilities, such as tagging, inline suggestions, inline comments, merge request comments, review threads, and review requests, which can help improve code quality, availability, reliability, and performance. Right after the creation of the merge request is the start of the GitLab Flow inner feedback loop, which is where code and fix pushes, test and scan runs, and collaboration and update reviews take place.\n\n#### Pipelines\nAs updates are applied to a feature branch via merge requests, pipelines — if defined — are automatically executed. Pipelines can have multiple stages and jobs to build and test, and then deploy the application or microservice to a review environment. In that review environment, the updates can be dynamically verified before they are merged to the main branch. This automation helps streamline the application update and review processes.\n\nIn addition, as DevSecOps teams make updates to the application via merge requests, they have a variety of AI-powered capabilities at their disposal. As they write or update code, GitLab Duo **Code Suggestions** recommends code that should come next and the developer can choose to accept or ignore the recommendation. Code Suggestions support code generation via prompts as well as code completion as you type. Code Suggestions can help improve the programming experience by reducing errors and helping developers write code faster, which can help enhance production code quality. Code Suggestions also can lead to higher developer productivity and faster iterations and rollouts.\n\nAs different stakeholders within the organization participate in the development or review of applications, they may encounter code that is poorly documented, complex or difficult to understand, or is written in a programming language unfamiliar to them. The GitLab Duo **Code explanation** capability explains code in natural language so that everyone can understand the code and get up to speed quickly.\n\nMoreover, when updates are committed to the feature branch, the GitLab Duo **Suggested reviewers** capability uses the changes in a merge request and a project’s contribution graph to suggest appropriate reviewers in the reviewer dropdown in the merge request sidebar. The list includes users that are knowledgeable about a specific aspect of the application and would be the best candidates to review the updates. Developers save time by not having to search and identify adequate reviewers, streamlining the review process and avoiding delays and low-quality reviews.\n\nWhen developers make changes to the code, they often don't include a comment in the merge request about the specific changes they made. The GitLab Duo **Merge request summary** capability allows the author of merge request changes to use AI to generate a natural-language comment that summarizes the updates to the code. Reviewers then can better understand the changes and streamline the entire review process\n\nAs reviewers review updates to the code in a merge request, they can create a review block, which can consist of many comments spanning many source files. To help the original author of the updates better understand the feedback provided by the reviewer in a long review block, the GitLab Duo **Code review summary** capability generates a natural-language summary of the reviewer’s feedback. This enables better handoff between authors and reviewers, streamlining the review process.\n\nFurthermore, when developers add new code via a merge request, they can leverage the GitLab Duo **Test generation** capability to use AI to generate unit tests for the new code. This can help to increase developer productivity, improve test coverage, and catch bugs early in the development lifecycle. Developers can also leverage GitLab Duo **Chat**, which is always accessible, to refactor code and generate in-line documentation, e.g. docstrings, for their source code.\n\nWhile pipelines execute on branch updates, they can include automated tests and scans, which helps in shifting security left.\n\n### Shifting security left\n\n![Shifting security left - third portion of GitLab Flow](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/The-GitLab-Flow-2023-shift-sec-left-portion.png)\n\u003Ccenter>Shifting security left - third portion of GitLab Flow\u003C/center>\u003Cp>\u003C/p>\n\nThe third portion of GitLab Flow is shifting security left, which is also part of the GitLab Flow inner feedback loop.\n\nIn addition to DevOps and platform engineers, system and database administrators, and developers, some of the stakeholders collaborating in a merge request may be concerned about security and compliance, which is where automated tests and security scans play a role. Scans can be simply included in a pipeline via readily available templates and/or can be automatically executed within a merge request pipeline. GitLab provides a broad set of built-in security scanners and analyzers that can be leveraged by GitLab Flow, but the DevSecOps platform can also accommodate third-party and custom scanners.\n\nGitLab Flow shifts security left in the pipeline to detect and resolve defects as early as possible in the software development process. It is much simpler and cheaper to fix vulnerabilities early in the development cycle than once the application is in production, where an unscheduled outage can affect your users and revenue.\n\nThe built-in security scanners and analyzers provided by GitLab include: unit testing, infrastructure-as-code (IaC) scanning, static application security testing (SAST) scanners, dependency scanning, secret detection, container scanning, API security, web API fuzz testing, and coverage-guided fuzz testing. In addition, GitLab provides a variety of security dashboards and reports to manage and visualize vulnerabilities, such as the Dependencies list, Security dashboard, Vulnerability Report, and vulnerability pages.\n\nTo help developers and security engineers better understand and remediate vulnerabilities more efficiently, the GitLab Duo **Vulnerability explanation** capability provides an explanation about a specific vulnerability, how it can be exploited, and, most importantly, a recommendation on how to fix the vulnerability. Developers can also take advantage of GitLab Duo **Vulnerability resolution**, which automatically creates a merge request that includes code changes to fix the vulnerability. These AI-powered capabilities can help streamline and optimize the process of securing and hardening an application to prevent vulnerabilities that can be exploited by cyber attacks in production.\n\nBesides SAST scanners, GitLab provides dynamic application security testing (DAST) scanners, which require a running application. When leveraging these scanners, GitLab is capable of automatically provisioning a DAST environment for the DAST scans and then performing a complete cleanup of all resources post-DAST testing. In addition, for running containers, GitLab provides operational container scanning, which scans container images in your cluster for security vulnerabilities.\n\nThe scans mentioned above can be executed automatically within a merge request pipeline or, in some cases, can be scheduled for execution via scan execution and merge request approval policies. These policies can be defined via the GitLab UI or YAML files and are configured in a separate project, allowing segregation of duties for reusability, maintenance, and management. Scan execution policies require that security scans be run on a specified schedule or with the project pipeline, and merge request approval policies take action based on scan results. Security engineers or teams can define these policies to enforce security processes across the organization and GitLab Flow may encounter or leverage these as it spans through its steps.\n\nTo enforce security and compliance across projects in your organization, you can use compliance labels and pipelines. Compliance labels and pipelines can be made mandatory to execute before a project’s own pipeline. With this approach, you can ensure that all teams within your organization meet your security and compliance standards. In addition, you can secure your applications against cyber attacks, conform to government compliance standards, and always be audit-ready.\n\nThe main goal of all of these GitLab Flow security prescriptions is to fix vulnerabilities early in the development cycle rather than once the application is in production, where remediating a vulnerability can prove to be very costly in reputation and revenue.\n\nAs vulnerabilities are mitigated within the GitLab Flow inner feedback loop and more updates are applied to the application in the feature branch, stakeholders need to re-review these updates to ensure that the updates have taken place and no regressions have inadvertently been introduced.\n\n### Continuous review\n\n![Reviews - fourth portion of GitLab Flow](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/The-GitLab-Flow-2023-reviewing-features-portion.png)\n\u003Ccenter>Reviews - fourth portion of GitLab Flow\u003C/center>\u003Cp>\u003C/p>\n\nThe next portion of GitLab Flow is reviewing features, which prescribes the continuous review of applications. Reviewing features involves the ability to stand up a review environment to which the interim application (feature branch) is deployed so that stakeholders can review it in real time and provide feedback. The interim application can then be continuously adjusted until it is ready to be merged to the main branch. GitLab Flow also prescribes the cleanup of all provisioned review environment resources at the moment when the merge request is merged to the main branch.\n\nThis iterative automated review process is part of the inner feedback loop in GitLab Flow. As mentioned above, within the inner feedback loop, GitLab Duo capabilities like Code explanation, Code Suggestions, Suggested reviewers, Merge request summary, Merge request template population, Code review summary, Vulnerability explanation, Vulnerability resolution, and Root cause analysis are prescribed by GitLab Flow to enable a better handoff between authors and reviewers and streamline the entire review process.\n\nThe GitLab Flow inner feedback loop terminates when all review items are addressed and the merge request is approved and merged to the main branch, which triggers the deployment of the application to production.\n\n### Deploying applications and infrastructure\n\n![Deploying - fifth portion of GitLab Flow](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/The-GitLab-Flow-2023-deploy-apps-portion.png)\n\u003Ccenter>Deploying - fifth portion of GitLab Flow\u003C/center>\u003Cp>\u003C/p>\n\nDepending on an organization’s needs, either continuous delivery or continuous deployment is prescribed by GitLab Flow. Whereas continuous delivery is the frequent release of code by triggering the deployments manually (e.g., to production), continuous deployment is the automated release of code (e.g., to production) without human intervention. Let’s cover continuous delivery first.\n\nAs you release your software using continuous delivery, you have a few deployment options. You can establish a freeze window and then deploy using advanced deployment techniques, such as canary, blue/green, timed, and incremental rollouts. Incremental rollouts can lower the risk of production outages delivering a better user experience and customer satisfaction. Advanced deployment techniques can also improve development and delivery efficiency, streamlining the release process.\n\nAs you release your software using continuous deployment, all changes/updates go directly to production. Progressive delivery approaches like feature flags, which allow you to separate the delivery of specific features from a launch, are a good way to reduce risk and manage what functionality to make available to production users. Feature flags support multiple programming languages and allow developer experimentation and controlled testing. You can even use feature flags to roll out features to specific users.\n\nAlthough GitLab supports all these deployment approaches, GitLab Flow allows for the adoption of the approach that best fits the organization and/or specific project needs.\n\n### Monitoring applications and DevSecOps processes\nOnce your application has been deployed to production, it needs to be continuously monitored to ensure its stability, performance, and availability. In addition, as the DevSecOps processes execute, they are measured, providing the opportunity to improve their performance and efficiency. The monitoring capabilities are provided by GitLab and, as such, can be leveraged by GitLab Flow.\n\nFor running containers, GitLab provides operational container scanning (OCS), which scans container images in your cluster for security vulnerabilities. These scans can be automated by scheduling them when to run and any found vulnerabilities are automatically displayed in a security dashboard. The OCS can help keep your cluster applications secure and preempt any cyber attacks that can lead to leaks of private data and even cause unexpected outages.\n\nError tracking allows developers to discover and view errors generated by their application. All errors generated by your application are displayed in the Error Tracking list in GitLab. Error tracking can help with availability and performance of your applications by detecting and resolving unexpected application conditions fast.\n\nGitLab can accept alerts from any monitoring source, including Prometheus, via a webhook receiver. As alerts come in, they are displayed in the GitLab Alerts list, from which you can manually manage them. Alerts can also automatically trigger the creation of incidents, ChatOps, and email messages to appropriate individuals or groups. All these capabilities streamline the alert resolution and management process.\n\nAs incidents are created, due to production problems, they appear in the GitLab Incidents list for incident management. You can manage one or more incidents, sort them, search them, assign them, set their statuses, and even see their SLA preset countdown timer. Moreover, you can create on-call schedules and rotations, escalation policies, and set up paging and notifications to handle incidents. In addition, you can link an incident to an alert so that when the incident is closed, its associated alert is automatically resolved. Incident timelines are another capability for executives and external viewers to see what happened during an incident, and which steps were taken for it to be resolved. All these capabilities streamline the incident management process so that they can be resolved as quickly as possible.\n\nAudit events track important events, including who performed the related action and when in GitLab. These events are displayed in the GitLab Audit Events list and provide, among others, the action that was taken on an object, who did it, and the date and time of its occurrence.\n\nAll the lists and dashboards mentioned above can help preempt out-of-compliance scenarios to avoid penalties as well as streamline audit processes. For your running applications, they generate the data and metrics that can be used in the GitLab Flow outer feedback loop to help improve and optimize your applications and lower the risk of unscheduled production outages.\n\n### Continuous improvement\nWhen applying GitLab Flow, you also have the opportunity to use the insight that GitLab provides in the form of end-to-end process metrics dashboards to continuously improve not just your application but also your software delivery performance. These dashboards and their metrics are auto-generated by GitLab and are always available.\n\n### The Value Stream Analytics dashboard\n\nYou can track and monitor your application development lifecycle through the Value Stream Analytics Dashboard, where you can check project or group statistics over time. This dashboard is customizable but you can get started quickly by creating a value stream using a GitLab-provided default template. The default dashboard displays metrics for each of the pre-defined stages of your value stream analytics, namely Issue, Plan, Code, Test, Review, and Staging, as well as a graph with the average time to completion for each. It also shows the value stream analytics key metrics: lead time, cycle time, new issues, commits, and deploys. You can use these metrics to find areas of improvement in the stages of your value stream.\n\n### DORA metrics dashboard\n\nTo view the performance metrics that measure the effectiveness of your organization’s development and delivery practices, GitLab provides the [DORA](https://about.gitlab.com/solutions/value-stream-management/dora/) (DevOps Research and Assessment) metrics dashboard, which displays four key metrics: Deployment Frequency, Lead Time for Changes, Time to Restore Service, and Change Failure Rate. Deployment Frequency measures how often your organization deploys code to production or releases it to end users. Lead Time for Changes measures how long it takes to go from code committed to code successfully running in production. Time to Restore Service measures the time needed to restore services to the level they were previously, in case of an incident. Finally, Change Failure Rate is the percentage of changes to production or released to users that resulted in a degraded service (for example, a change that caused a service impairment or outage) and subsequently required remediation (required a hotfix, rollback, patch). These four key metrics are outcomes of your current processes and give you the opportunity to improve the factors and capabilities that drive them.\n\n### Customize your dashboard\n\nAnother dashboard is the Value Streams Dashboard, which is a customizable dashboard that enables decision-makers to identify trends, patterns, and opportunities for software development improvements. The metrics shown are the DORA metrics followed by the value stream analytics flow metrics and counts for critical and high vulnerabilities for the month to date, the two preceding months, and the past six months.\n\nGitLab Duo can also help in your continuous improvement efforts. For example, the **Value stream forecasting** capability takes historical data and uses data trends across your development lifecycle to predict the future behavior of your value stream metrics. You can use these predictive analyses in your optimization initiatives.\n\nAll these dashboards and the metrics they report on are part of the GitLab Flow outer feedback loop to help you lower the risk of unscheduled production outages and improve and optimize your applications and DevSecOps workflows.\n\n### AI impact analytics\nTo better understand the impact of the use of GitLab Duo (or AI) along the entire development life cycle, you can check the [AI Impact analytics](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/), from where you can see how the adoption of GitLab Duo Code Suggestions impacts other performance, quality and security metrics. You can visualize the last six months of AI adoption and its impact on other metrics, such as cycle time, lead time, deployment frequency, change failure rate, and critical vulnerabilities over time.\n\nAI impact analytics help to measure adoption, effectiveness and benefits that AI brings to teams and organizations and also to identify areas for improvement.\n\n## Why use GitLab Flow?\nGitLab Flow is a prescribed approach, practiced by our customers and users worldwide, that can provide the following benefits: \n- Higher productivity via the automation capabilities provided by GitLab and its single user interface and data model, all leveraged by GitLab Flow\n- Accurate insights into the end-to-end DevSecOps lifecycle to support continuous improvement\n- Built-in dashboards and metrics that can help you optimize your applications and DevSecOps processes\n- Higher code quality and improved reliability and availability of your applications\n- Better application security through built-in security scanners and capabilities\n- Compliance- and audit-readiness via built-in compliance features\n- Shorter cycle times that can help you increase deployment frequency\n- Continuous review enabled by the GitLab Flow inner feedback loop\n- The GitLab Flow inner feedback loop can help you optimize application updates leading to better code quality and higher reliability and availability of your applications\n- The GitLab Flow outer feedback loop can help you improve your applications as well as the development lifecycle itself\n- High levels of collaboration among stakeholders in your organization\n- Shifting security left to help find vulnerabilities in applications before they make it to production to avoid costly, unscheduled outages\n- Lower risk when deploying to production via the advanced deployment techniques and progressive delivery approaches supported by GitLab\n- AI-powered capabilities that span across the entire development lifecycle and can boost productivity, code quality, continuous improvement, security and compliance, and more\n- Support for cloud-native and non-cloud-native applications\n- Multi-cloud support for hybrid/multi-cloud applications\n- Shifting security left to help you find vulnerabilities in your applications before they make it to production so that you can avoid costly unscheduled outages\n\nHow can you get started with GitLab Flow? Leveraging GitLab Auto DevOps or parts of it is a good starting point for applying GitLab Flow principles to your application development lifecycle.\n\n## GitLab Flow and Auto DevOps\n\n![Auto DevOps - an instantiation of GitLab Flow](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/ado-pipeline.png)\n\u003Ccenter>Auto DevOps - an instantiation of GitLab Flow\u003C/center>\u003Cp>\u003C/p>\n\n[Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) applies GitLab Flow throughout all its stages and jobs. You can think of it as a good example for the instantiation of GitLab Flow.\n\nAuto DevOps is a collection of predefined, out-of-the-box CI/CD templates that auto-discover the source code you have. Based on best practices, these templates automatically detect, build, test, deploy, and monitor your applications.\n\nThe Auto DevOps pipeline shifts work left to find and prevent defects as early as possible in the software delivery process. The pipeline then deploys the application to staging for verification and then to production in an incremental/timed fashion.\n\nAuto DevOps gets you started quickly, increasing developer productivity, and it can be easily customized to your needs, with support for the most common programming frameworks and languages. Auto DevOps is modular, customizable, and extensible, which allows you to leverage pieces of it in your pipelines or apply all of it for your application.\n\n## Get started\n[Combine GitLab Flow and GitLab Duo today](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com%2Fblog%2F) to achieve significant improvements in end-to-end workflow efficiency that can lead to even higher levels of productivity, deployment frequency, code quality and overall security, and production resiliency and availability. \n\nIf you'd like to see a workflow in action that combines GitLab Flow and GitLab Duo and how it can benefit you, watch the following video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/CKrZ4_tKY4I?si=Kf6QsYFIzKkJZpJd\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[109,9,703],"2024-06-18",{"slug":1874,"featured":6,"template":680},"gitlab-flow-duo","content:en-us:blog:gitlab-flow-duo.yml","Gitlab Flow Duo","en-us/blog/gitlab-flow-duo.yml","en-us/blog/gitlab-flow-duo",{"_path":1880,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1881,"content":1887,"config":1895,"_id":1897,"_type":14,"title":1898,"_source":16,"_file":1899,"_stem":1900,"_extension":19},"/en-us/blog/gitlab-global-devsecops-ai-report",{"title":1882,"description":1883,"ogTitle":1882,"ogDescription":1883,"noIndex":6,"ogImage":1884,"ogUrl":1885,"ogSiteName":693,"ogType":694,"canonicalUrls":1885,"schema":1886},"GitLab DevSecOps AI Report: A new software development era","Our survey found that DevSecOps teams are optimistic about AI, but privacy, security, and training emerged as key challenges to successful AI adoption.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663908/Blog/Hero%20Images/2023-devsecops-report-blog-banner2.png","https://about.gitlab.com/blog/gitlab-global-devsecops-ai-report","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Global DevSecOps AI Report: Ushering in a new era of software development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ashley Kramer\"}],\n        \"datePublished\": \"2023-09-12\",\n      }",{"title":1888,"description":1883,"authors":1889,"heroImage":1884,"date":1891,"body":1892,"category":10,"tags":1893},"GitLab Global DevSecOps AI Report: Ushering in a new era of software development",[1890],"Ashley Kramer","2023-09-12","\nAI has taken the world by storm, creating a tectonic shift across industries and society as a whole. With it, important discussions have emerged about its beneficial applications and the potential for negative repercussions, especially in the field of software development. AI is already changing how software is designed, built, secured, and deployed. With all of the industry buzz around the technology, it’s more important than ever to separate the hype from reality. \n\nThat’s why I’m pleased to share that GitLab has recently released its [Global DevSecOps Report: The State of AI in Software Development](https://about.gitlab.com/developer-survey/#ai). With this survey, we had one goal in mind: to uncover whether AI is living up to its promise. We surveyed more than 1,000 global senior technology executives, developers, and security and operations professionals to understand how organizations use AI in software development today, and what they hope to achieve with it in the future.\n\nMany of the sentiments reflected in the report echo what I hear firsthand from customers across industries and across the world: An eagerness to harness the benefits of AI for business innovation, while remaining cautious of potential risks. \n\nPerhaps unsurprisingly, privacy and security, productivity, and training emerged as key challenges to successful AI adoption. Organizations recognize that the policies and strategies put into place now, and the ways in which we shift our workflows to incorporate AI, will shape the future of software development. \n\nLet’s dive into a few of the key findings.\n\n> Download the full [Global DevSecOps Report: The State of AI in Software Development](https://about.gitlab.com/developer-survey/#ai).\n\n## As the excitement around AI increases, so do security concerns\nSecurity and privacy are top concerns, but they don’t detract from the urgency to implement AI. While 83% of those surveyed said that implementing AI in their software development processes is essential to avoid falling behind, 79% noted that they are concerned about AI tools having access to private information or intellectual property.\n\nThe top concern, by far, is the potential for sensitive information such as customer data being exposed (72%), and nearly half of respondents (48%) said they were concerned that trade secrets may be exposed. \n\nSimilarly, when it comes to the biggest concerns around introducing AI into the software development lifecycle, 48% said that AI-generated code may not be subject to the same copyright protection as human-generated code.\n\nGiven these results, it’s not surprising that 95% of senior technology executives said they prioritize privacy and protection of intellectual property when selecting an AI tool.\n\nTo safely benefit from AI, organizations can avoid pitfalls, including data leakage and security vulnerabilities, by first deploying it in a low-risk environment in their organization. This enables teams to learn by trial and error and build best practices before allowing additional teams to adopt AI, ensuring it scales safely and sustainably. \n\n## AI is poised to increase the productivity of some teams — and increase the workload of others\nDifferent business functions have different goals and use cases for AI, highlighting conflicting areas of opportunity and concern. \n\nOur survey findings show that 40% of security practitioners are worried that AI-powered code generation will increase their workload. However, code generation is just one of many areas where AI can add value. In our survey, developers told us that they spend only 25% of their total time writing code. The rest is spent improving existing code, understanding code, testing and maintaining code, and identifying and mitigating security vulnerabilities. Organizations stand to see major productivity and collaboration benefits by applying AI across the software development lifecycle.\n\t\t\t\nApproximately 50% of respondents expressed interest in AI-powered use cases across the software development lifecycle beyond code generation. In other words, there’s a strong appetite for more — and more integrated — AI spanning the breadth of the software development lifecycle. \n\n## Companies and employees are at odds over how to bridge the AI skills gap \nWhile organizations reported optimism about their company’s use of AI, our survey shows a discrepancy between organizations’ and practitioners’ satisfaction with AI training resources. \n\nDespite 75% of respondents saying their organization provides training and resources for using AI, a roughly equal proportion also said they are finding resources on their own, suggesting that the available resources and training within organizations may be insufficient. \n\nWith AI introducing a new set of skills to learn, 34% of respondents said they need training to use or interpret AI, and developers were significantly more likely to lack confidence in AI-generated output than either security or operations respondents (38% compared to 28% and 28%, respectively). \n\nOrganizations should focus on providing AI training and resources to all job roles and functional areas that will be using AI, and it is especially important to ensure that the resources for development teams are relevant, up to date, and cover the latest AI technologies and applications.\n\n## But wait, there’s more\nThese findings reinforce that for organizations to benefit from AI, it needs to be secure and delivered in a single application that is embedded across the entire software development lifecycle. \n\nThese core tenets guide our vision for the future of the GitLab AI-powered DevSecOps platform and [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI capabilities that enables organizations to boost efficiency, productivity, and collaboration. Only by adopting a privacy-first, integrated approach to implementing AI can organizations be confident that their intellectual property is safe while empowering everyone to deliver better, more secure software faster. \n\nTo explore the full report, [download the Global DevSecOps Report: The State of AI in Software Development](https://about.gitlab.com/developer-survey/#ai).\n",[703,9,1894],"developer survey",{"slug":1896,"featured":6,"template":680},"gitlab-global-devsecops-ai-report","content:en-us:blog:gitlab-global-devsecops-ai-report.yml","Gitlab Global Devsecops Ai Report","en-us/blog/gitlab-global-devsecops-ai-report.yml","en-us/blog/gitlab-global-devsecops-ai-report",{"_path":1902,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1903,"content":1908,"config":1913,"_id":1915,"_type":14,"title":1916,"_source":16,"_file":1917,"_stem":1918,"_extension":19},"/en-us/blog/gitlab-jetbrains-neovim-plugins",{"title":1904,"description":1905,"ogTitle":1904,"ogDescription":1905,"noIndex":6,"ogImage":820,"ogUrl":1906,"ogSiteName":693,"ogType":694,"canonicalUrls":1906,"schema":1907},"GitLab plugins for JetBrains and Neovim now available in Beta","GitLab plugins for JetBrains IDEs and Neovim are now available in Beta, bringing GitLab Duo Code Suggestions to more software development environments.","https://about.gitlab.com/blog/gitlab-jetbrains-neovim-plugins","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab plugins for JetBrains and Neovim now available in Beta\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kai Armstrong\"}],\n        \"datePublished\": \"2023-07-25\",\n      }",{"title":1904,"description":1905,"authors":1909,"heroImage":820,"date":1910,"body":1911,"category":10,"tags":1912},[1448],"2023-07-25","\n\n_This blog post is the latest in an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). Start with the first blog post: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab._\n\nIn June, we shared our plans to [extend AI-powered code suggestions](/blog/extending-code-suggestions/) to more IDEs, thereby continuing to help enhance developer productivity. A few weeks ago, we [announced](/blog/gitlab-visual-studio-extension/) the availability of our [extension for Visual Studio](https://marketplace.visualstudio.com/items?itemName=GitLab.GitLabExtensionForVisualStudio). Now, we're adding support for JetBrains and Neovim with official plugins to further extend the reach of GitLab Duo Code Suggestions and help enhance developer productivity across even more development environments.\n\nThese new GitLab plugins for both JetBrains and Neovim support [GitLab Duo](https://about.gitlab.com/gitlab-duo/) Code Suggestions for both [GitLab SaaS](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-gitlab-saas) and [GitLab self-managed](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-self-managed-gitlab).\n\n## GitLab for JetBrains IDEs\nYou can download the GitLab for JetBrains plugin from the [JetBrains Plugin Marketplace](https://plugins.jetbrains.com/plugin/22325-gitlab) or from directly within your IDE by visiting `Settings` -> `Plugins` and then searching for `GitLab`. Once you've installed the plugin, follow the [setup instructions](https://gitlab.com/gitlab-org/editor-extensions/gitlab-jetbrains-plugin#setup) to configure authentication and get started.\n\n![JetBrains GitLab Duo Code Suggestions Settings](https://about.gitlab.com/images/blogimages/jetbrains-code-suggestions-settings.png)\n\nYou can verify if the extension is connected and working by checking the status bar icon. If everything looks good, you're ready to start receiving code suggestions as you work. Just start typing and GitLab Duo will automatically provide you suggestions inline. Press `Tab` to accept the suggestions or keep typing to receive new suggestions.\n\n![JetBrains GitLab Duo Code Suggestions](https://about.gitlab.com/images/blogimages/jetbrains-code-suggestions-ghost-text.png)\n\nWe look forward to hearing from you about this initial release! You can provide feedback or report any issues you're having in our [feedback issue](https://gitlab.com/gitlab-org/editor-extensions/gitlab-jetbrains-plugin/-/issues/38).\n\n## GitLab for Neovim\nThe [GitLab for Neovim plugin](https://gitlab.com/gitlab-org/editor-extensions/gitlab.vim) can be found on GitLab and you can [follow the instructions to get started](https://gitlab.com/gitlab-org/editor-extensions/gitlab.vim#getting-started). Once you've downloaded the plugin, there are [configuration options](https://gitlab.com/gitlab-org/editor-extensions/gitlab.vim#configuration) available to help customize your experience.\n\nOnce you've configured the plugin you'll be able to receive suggestions directly within the UI. Check out our demo video below to learn more about the plugin.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/PRSPQvbFquU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWe're continuing to iterate on our Neovim plugin. You can provide feedback or report any issues you're having in our [feedback issue](https://gitlab.com/gitlab-org/editor-extensions/gitlab.vim/-/issues/22).\n\n## Iterating on AI/ML features\nThese new additions to our family of editor extensions join our existing extensions for [Visual Studio](https://marketplace.visualstudio.com/items?itemName=GitLab.GitLabExtensionForVisualStudio) and [Visual Studio Code](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow). We plan to continue iterating to make the GitLab Duo Code Suggestions experience even better.\n\nWe're also continuing our work on a [GitLab Language Server for Code Suggestions](https://gitlab.com/gitlab-org/editor-extensions/gitlab-language-server-for-code-suggestions), which will allow us to standardize and iterate faster on our IDE extensions, and will enable users of IDEs and code editors to use GitLab Duo Code suggestions even if an official extension isn't available. We look forward to providing more documentation and working with the community on this project in the future.\n\nThese efforts are just the start of how we're incorporating GitLab Duo capabilities throughout the [software development lifecycle](/blog/what-the-ml-ai/) to help GitLab users become more efficient and effective. As we continue to identify painful and time-consuming tasks that are ideal for AI-assisted features, we'll continue to share updates, tutorials, and demos through this blog series.\n\nCheckout GitLabs 16.2 release to see what's new with [Code Suggestions](https://about.gitlab.com/releases/2023/07/22/gitlab-16-2-released/#gitlab-duo-code-suggestions-improvements-powered-by-google-ai).\n\nInterested in using GitLab Duo features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and we'll keep you updated.\n\nContinue reading our \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\" series.\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[703,725,704,9],{"slug":1914,"featured":6,"template":680},"gitlab-jetbrains-neovim-plugins","content:en-us:blog:gitlab-jetbrains-neovim-plugins.yml","Gitlab Jetbrains Neovim Plugins","en-us/blog/gitlab-jetbrains-neovim-plugins.yml","en-us/blog/gitlab-jetbrains-neovim-plugins",{"_path":1920,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1921,"content":1927,"config":1933,"_id":1935,"_type":14,"title":1936,"_source":16,"_file":1937,"_stem":1938,"_extension":19},"/en-us/blog/gitlab-named-a-leader-in-2024-gartner-magic-quadrant-for-ai-code-assistants",{"title":1922,"description":1923,"ogTitle":1922,"ogDescription":1923,"noIndex":6,"ogImage":1924,"ogUrl":1925,"ogSiteName":693,"ogType":694,"canonicalUrls":1925,"schema":1926},"GitLab named a Leader in 2024 Gartner Magic Quadrant for AI Code Assistants","In the first Gartner® Magic Quadrant™ for this category, GitLab is recognized for its ability to execute and completeness of vision in AI code assistant technology.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664458/Blog/Hero%20Images/Gartner_AI_Code_Assistants_Blog_Post_Cover_Image_1800x945.png","https://about.gitlab.com/blog/gitlab-named-a-leader-in-2024-gartner-magic-quadrant-for-ai-code-assistants","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab named a Leader in 2024 Gartner Magic Quadrant for AI Code Assistants\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dave Steer\"}],\n        \"datePublished\": \"2024-08-22\",\n      }",{"title":1922,"description":1923,"authors":1928,"heroImage":1924,"date":1929,"body":1930,"category":10,"tags":1931},[1774],"2024-08-22","We’re thrilled to announce that GitLab has been recognized as a Leader in the [Gartner® Magic Quadrant™ for AI Code Assistants](https://about.gitlab.com/gartner-mq-ai-code-assistants/) — the first-ever year of this category. We feel this is an important recognition and we believe it highlights our commitment to delivering AI-powered capabilities that accelerate software delivery, enhance security, and drive innovation for our customers. \n\nAI code assistants go beyond just code generation and completion. They're collaborative partners that boost developer efficiency by improving code quality and continuous learning. By automating routine tasks and providing intelligent suggestions, assistants like GitLab Duo — our suite of AI-powered features — free up developer time to focus on higher-level problem-solving. \n\n![Gartner MQ AI Code Assistants image](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675964/Blog/Content%20Images/AI_Code_Assistants_MQ_graphic__1_.png)\n\n> Download the [2024 Gartner® Magic Quadrant™ for AI Code Assistants report](https://about.gitlab.com/gartner-mq-ai-code-assistants/).\n\n## AI code assistants: Speed, security, and seamless integration\n\nAI code assistants are integral to organizations of all sizes, helping DevSecOps teams develop and deploy secure software faster. However, the true value of AI emerges when it’s integrated across the entire software development lifecycle. Unlike limited AI point solutions, which can lead to fragmented toolchains and data silos, GitLab’s comprehensive platform embeds AI from planning through production, offering holistic visibility and insights via metrics and dashboards.\n\n## The power of GitLab Duo\n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/) is a comprehensive toolbox of AI capabilities designed to improve the developer experience, shift security left in the development cycle, and strengthen collaboration across Dev, Sec, and Ops teams. Key features include: \n\n* Code Suggestions for code generation and code completion\n* Chat for context-aware, in-app assistance on code explanation, code refactoring, and test generation\n* Vulnerability Explanation to better understand vulnerabilities in code\n* Vulnerability Resolution to help mitigate found vulnerabilities\n* Root Cause Analysis to troubleshoot pipeline issues\n* AI Impact Analytics Dashboard to gain real-time insights and evaluate an organization's AI ROI\n\n## Maximizing ROI with AI \n\nBusiness and engineering leaders need visibility into how AI is being used across the software development lifecycle to assess the ROI of their technology investments. GitLab's [AI Impact Analytics Dashboard](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/) provides that visibility as well as metrics to gauge AI adoption rates, performance improvements, and more.\n\n## Flexibility, privacy, and transparency at the forefront\n\nGitLab customers looking to explore AI-powered capabilities can use GitLab Duo to leverage the power of AI securely across an IDE of choice or a remote development workspace right out of the box, with a flexible pricing structure and a 60-day free trial. Also, the [GitLab AI Transparency Center](https://about.gitlab.com/ai-transparency-center/) provides full visibility into our governance and transparency practices. \n\nSoon, organizations will be able to [tailor their AI experience](https://about.gitlab.com/blog/meet-gitlab-duo-workflow-the-future-of-ai-driven-development/) to their strategic and regulatory requirements with model personalization and self-hosted model deployment. Model personalization will allow enterprises to customize GitLab Duo and tap into AI’s full potential in close alignment with their business goals, operational needs, and customer expectations. Self-hosted model deployment ensures that data does not leave an organization's secure environment, reducing the risk of breaches and ensuring compliance for highly regulated industries. \n\n## Leading the future of AI in DevSecOps\n\nGitLab is your partner in AI-driven software development. We equip teams with the tools to build, secure, and deploy software faster. Our commitment to innovation ensures you're always at the forefront of AI advancements. Stay tuned for exciting updates on our roadmap as we continue to revolutionize DevSecOps.\n\n> [Download the 2024 Gartner® Magic Quadrant™ for AI Code Assistants report](https://about.gitlab.com/gartner-mq-ai-code-assistants/).\n\n***Source: Gartner, Magic Quadrant for AI Code Assistants, Arun Batchu, Haritha Khandabattu, Philip Walsh, Matt Brasier, August 2024***\n\n***GARTNER is a registered trademark and service mark of Gartner, Inc. and/or its affiliates in the U.S. and internationally, and MAGIC QUADRANT is a registered trademark of Gartner, Inc. and/or its affiliates and are used herein with permission. All rights reserved.***\n\n***Gartner does not endorse any vendor, product or service depicted in its research publications, and does not advise technology users to select only\nthose vendors with the highest ratings or other designation. Gartner research publications consist of the opinions of Gartner’s research\norganization and should not be construed as statements of fact. Gartner disclaims all warranties, expressed or implied, with respect to this research, including any warranties of merchantability or fitness for a particular purpose.***\n\n***This graphic was published by Gartner Inc. as part of a larger report and should be evaluated in the context of the entire document. The Gartner\ndocument is available upon request from Gartner B.V.***\n",[1135,9,703,1932],"research",{"slug":1934,"featured":91,"template":680},"gitlab-named-a-leader-in-2024-gartner-magic-quadrant-for-ai-code-assistants","content:en-us:blog:gitlab-named-a-leader-in-2024-gartner-magic-quadrant-for-ai-code-assistants.yml","Gitlab Named A Leader In 2024 Gartner Magic Quadrant For Ai Code Assistants","en-us/blog/gitlab-named-a-leader-in-2024-gartner-magic-quadrant-for-ai-code-assistants.yml","en-us/blog/gitlab-named-a-leader-in-2024-gartner-magic-quadrant-for-ai-code-assistants",{"_path":1940,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1941,"content":1947,"config":1952,"_id":1954,"_type":14,"title":1955,"_source":16,"_file":1956,"_stem":1957,"_extension":19},"/en-us/blog/gitlab-premium-with-duo",{"title":1942,"description":1943,"ogTitle":1942,"ogDescription":1943,"noIndex":6,"ogImage":1944,"ogUrl":1945,"ogSiteName":693,"ogType":694,"canonicalUrls":1945,"schema":1946},"Unlocking AI for every GitLab Premium and Ultimate customer","GitLab Premium and Ultimate now include GitLab Duo essentials for creating and understanding code throughout the software development lifecycle, all at no additional cost.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749660188/Blog/Hero%20Images/blog-premium-with-duo-cover-0756-fy26-v2-1800x945.png","https://about.gitlab.com/blog/gitlab-premium-with-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Unlocking AI for every GitLab Premium and Ultimate customer\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2025-05-15\",\n      }",{"title":1942,"description":1943,"authors":1948,"heroImage":1944,"date":1949,"body":1950,"category":725,"tags":1951},[1486],"2025-05-15","Today, we launch GitLab 18.0, which highlights our latest innovations and plans in core DevSecOps workflows, security and compliance, and AI. __As part of this release, we're excited to announce that GitLab Premium and Ultimate now include essential GitLab Duo AI capabilities at no additional cost.__ All Premium and Ultimate customers will have immediate access to GitLab Duo Code Suggestions and Chat directly in their preferred supported source code editors and IDEs.\n\n## AI for every development team\n\nArtificial intelligence is now at the center of the developer experience. AI enhances coding in many ways: It analyzes your codebase and provides real-time suggestions as you type, creates functions and methods based on your project's context, reduces repetitive tasks, and automates code reviews.\n\nOver the past few years, we've built [GitLab Duo](https://about.gitlab.com/gitlab-duo/) to infuse generative and agentic AI capabilities like these into our platform. Because writing code is just the start of the software lifecycle – our [global DevSecOps study](https://about.gitlab.com/developer-survey/) found that developers spend 79% of their time on tasks other than code creation – we have adopted a strategy to integrate AI throughout the entire software development lifecycle. \n\nNow, we’re excited to take the next step forward by including essential GitLab Duo capabilities in our GitLab Premium and Ultimate tiers, enabling developers to get the benefits of AI at no additional cost.\n\nBy including GitLab Duo Chat and Duo Code Suggestions in Premium and Ultimate, every software engineer can accelerate their workflow within the IDE — without requiring separate tooling, licensing, or governance. All existing Premium and Ultimate customers now have instant access to Duo Chat and Code Suggestions, once they upgrade to GitLab 18.0, and this enhancement becomes standard for all new customers.\n\n> **\"GitLab has already been instrumental in eliminating our reliance on a fragmented toolchain, which cut costs from disconnected solutions, and streamlined our workflow. Enhancing GitLab Premium with Duo will give us even greater efficiency and cost savings as our developers spend less time on routine coding tasks and more time tackling complex challenges that drive real business value.”**\n>\n>- Andrei Nita, Chief Technology Officer at McKenzie Intelligence Services\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1083723619?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"GitLab Premium with Duo Core\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n\u003Cbr>\u003C/br>\nPremium and Ultimate customers now have these AI-native capabilities:\n\n#### GitLab Duo Code Suggestions\n\n* Generate complete functions and code blocks from comments  \n* Get intelligent code completions as you type  \n* Support for 20+ programming languages  \n* Available in most popular IDEs\n\nTake this interactive tour to learn about GitLab Duo Code Suggestions (click on the image to start the tour).\n\n\u003Ca href=\"https://gitlab.navattic.com/code-suggestions\">\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175911/Blog/b5gdnls7jdyrpeyjby5j.png\" alt=\"GitLab Duo Code Suggestions cover image\">\u003C/a>\n\nLearn more in our [Duo Code Suggestions documentation](https://docs.gitlab.com/user/project/repository/code_suggestions/).\n\n#### GitLab Duo Chat\n\n* Explain unfamiliar code to understand complex functionality  \n* Refactor existing code to improve quality and maintainability  \n* Generate comprehensive test cases to help catch bugs earlier  \n* Fix code issues directly in your workflow\n\n![Duo Chat - API endpoint explanation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673912/Blog/Content%20Images/Duo_Chat_-_gif_-_API_endpoint_explanation__3_.gif)\n\nLearn more in our [Duo Chat documentation](https://docs.gitlab.com/user/gitlab_duo_chat/).\n\n> **\"For us, as GitLab users, Duo's intelligent code suggestions have become a daily asset for our developers. Combined with the chat feature, it allows for immediate feedback and iteration, resulting in faster development cycles and a more secure codebase. It's a seamless and powerful addition to our workflows.\"**\n>\n>- Felix Kortmann, Chief Technology Officer, Ignite by FORVIA HELLA\n\n## Duo Enterprise now available to GitLab Premium customers\n\nDue to strong customer demand, we're also excited to share that [GitLab Premium](https://about.gitlab.com/pricing/premium/) customers now can purchase Duo Enterprise, our full suite of AI offerings, without needing to upgrade to GitLab Ultimate. Premium customers can enjoy a rich AI experience seamlessly integrated across the software development lifecycle. This includes exciting GitLab Duo capabilities like:\n\n* [Root Cause Analysis](https://docs.gitlab.com/user/gitlab_duo/use_cases/#root-cause-analysis-use-cases) helps resolve CI/CD pipeline failures quickly, ensuring your CI/CD pipelines remain green.  \n* [Code Review](https://docs.gitlab.com/user/project/merge_requests/duo_in_merge_requests/#have-gitlab-duo-review-your-code) enables faster merge request reviews by leveraging Duo as a code reviewer.  \n* [Advanced Chat](https://docs.gitlab.com/user/gitlab_duo_chat/) summarizes conversations, helps understand code changes, and provides advanced configuration assistance.  \n* [Self-Hosted](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/) enables Duo to be leveraged within air-gapped and offline environments by hosting approved AI models for Duo to use.\n\nIn addition to Duo Enterprise availability, we continue to invest in the success of GitLab Premium customers. Since the launch of GitLab 17, [we’ve shipped more than a hundred features and improvements](https://gitlab.com/gitlab-org/gitlab/-/releases), including: \n\n* [**CI/CD Catalog**](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/) enables developers to share, discover, and reuse   \npre-existing CI/CD components and configurations.  \n* [**Artifact registry**](https://docs.gitlab.com/user/packages/virtual_registry/) gives developers secure access to artifacts and seamless integration with CI/CD pipelines.  \n* [**Remote development**](https://docs.gitlab.com/user/project/remote_development/) enables developers to work in on-demand,  \ncloud-based development environments.\n\n> [Learn more about GitLab Premium features.](https://about.gitlab.com/pricing/premium/#wp-premium-features)\n\n## GitLab Duo: AI that meets organizations where they are\n\nGitLab customers have a comprehensive menu of Duo offerings, across our Pro and Enterprise solutions, to meet you where you are in the AI adoption cycle – the further along your teams are, the more capabilities you can use to build, test, and deploy secure software faster.\n\n![Key features in Duo plans](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673912/Blog/Content%20Images/Screenshot_2025-05-14_at_8.50.34_AM.png)\n\n## How current GitLab Ultimate and Premium customers can get started with Duo\n\nStarting with GitLab 18.0, for existing Ultimate and Premium customers, Duo Code Suggestions and Chat features will be off by default but can easily be enabled – learn how below.\n\nTo start experiencing GitLab Premium and Ultimate with Duo: \n\n1. Ensure you're on GitLab Premium or Ultimate. If not, you can start a free, 60-day trial. \n\n2. Enable GitLab Duo in your organization settings.\n\n3. If using a local IDE, install the appropriate GitLab [Editor Extension](https://docs.gitlab.com/editor_extensions/#available-extensions). \n\n4. Start using Code Suggestions and Chat in your preferred supported local IDE or the GitLab Web IDE.\n\n**Note:** For new customers and trials, GitLab's AI capabilities will be enabled automatically.\n\n## AI-native development requires a DevSecOps platform\n\nAI is fundamentally reshaping the developer experience. Organizations won't just have more people building software. They'll have more production-ready code generated by AI – **making GitLab more essential than ever.** \n\nWe built GitLab Premium and Ultimate with Duo specifically for this new reality, giving teams one secure foundation for all their code. As AI generates code across your organization, GitLab becomes your control center: no separate tools for security scanning, compliance checks, or managing pipelines. Just a single, unified platform that scales with your organization and helps ensure all code meets your standards before reaching production. As AI accelerates your development, GitLab enables you to maintain control, security, and quality from end to end.\n\n> To learn more about GitLab Duo and all the ways it can transform how your team works, [visit our GitLab Premium page](https://about.gitlab.com/pricing/premium/) or if you are a GitLab customer, reach out to your GitLab representative to schedule a demo. Finally, we invite you to join us on June 24, 2025, for our [GitLab 18 virtual launch event](https://about.gitlab.com/eighteen/) to learn about the future of AI-native software development.\n",[9,482,1135,704,725],{"slug":1953,"featured":91,"template":680},"gitlab-premium-with-duo","content:en-us:blog:gitlab-premium-with-duo.yml","Gitlab Premium With Duo","en-us/blog/gitlab-premium-with-duo.yml","en-us/blog/gitlab-premium-with-duo",{"_path":1959,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1960,"content":1966,"config":1972,"_id":1974,"_type":14,"title":1975,"_source":16,"_file":1976,"_stem":1977,"_extension":19},"/en-us/blog/gitlab-suggested-reviewers",{"title":1961,"description":1962,"ogTitle":1961,"ogDescription":1962,"noIndex":6,"ogImage":1963,"ogUrl":1964,"ogSiteName":693,"ogType":694,"canonicalUrls":1964,"schema":1965},"Unblock code reviews with GitLab Suggested Reviewers","Identify the right reviewers more quickly, saving time and accelerating the software development lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666316/Blog/Hero%20Images/codereview2.png","https://about.gitlab.com/blog/gitlab-suggested-reviewers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Unblock code reviews with GitLab Suggested Reviewers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-09-21\",\n      }",{"title":1961,"description":1962,"authors":1967,"heroImage":1963,"date":1968,"body":1969,"category":10,"tags":1970},[1735],"2023-09-21","\nIn the world of software development, speed is of the essence. The faster you can merge your code, the quicker you can iterate, innovate, and deliver value to your users. However, there's a roadblock that often hinders this need for speed – the arduous process of finding the right reviewer for your merge request (MR).\n\nImagine this: You've just finished a brilliant piece of code that's ready to be integrated into your project. You're excited to see it in action, and so are your teammates. But before your masterpiece can join the project, it must go through the crucial stage of code review. Here's where the challenge begins.\n\nIn many development teams, finding the ideal reviewer can be a time-consuming task. You might need someone with expertise in a particular area, someone who's available at the moment, and someone who can provide insightful feedback promptly. The longer it takes to locate the right reviewer, the more your code languishes in review limbo, delaying the entire development cycle.\n\nAnd let's not forget the frustration of starting a review with the wrong person, only to realize that their insights aren't quite what you needed. Backtracking to find another reviewer can be a difficult process, compounding the delays and creating unnecessary bottlenecks.\n\nThis challenge becomes even more pronounced as software projects grow in size and complexity, especially when multiple teams are involved. Coordinating reviews across team boundaries can be a logistical nightmare. \n\nWhat's worse is that these delays often set off a chain reaction, leading to even more delays down the line, as other tasks and dependencies pile up, affecting not just the current MR but the entire project's timeline.\n\n![development graphic](https://about.gitlab.com/images/blogimages/2023-09-22-suggested-reviewers/code-review.png)\n\nIn this blog post, we'll introduce you to [GitLab Suggested Reviewers](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/#suggested-reviewers). Suggested Reviewers is designed to streamline your MR workflow by ensuring that you connect with the right reviewers quickly and efficiently. Say goodbye to wasted time and welcome to a faster, more agile development cycle.\n\n## What is Suggested Reviewers?\nSuggested Reviewers leverages a machine learning algorithm that analyzes the changes in an MR and a project’s contribution graph to suggest reviewers with contextual knowledge. It generates a list of up to five suggested reviewers. The list is presented in the Reviewers dropdown in the merge request sidebar, empowering the author to immediately select available reviewers. These suggestions are contextual to the changes in the MR. Additional commits to MRs may change the reviewer suggestions, which are automatically updated in the reviewer dropdown list.\n\nIt also will display if a suggestion is a code owner or if they can merge the changes,helping drive towards action.\n\n![suggested reviewers](https://about.gitlab.com/images/blogimages/2023-09-22-suggested-reviewers/suggested-reviewers.png) \n\n## Key benefits of Suggested Reviewers\nHere are the key benefits of Suggested Reviewers.\n- **Time savings.** Suggested Reviewers eliminates the need for guesswork or manual reviewer selection. The feature automates the process, saving time for developers.\n- **Enhanced collaboration.** By connecting you with the right reviewers, Suggested Reviewers promotes collaboration and encourages knowledge sharing among team members.\n- **Improved code quality.** Suggested Reviewers enables developers to have the most qualified individuals review their code, resulting in better quality and more reliable software.\n- **Reduced bottlenecks.** Suggested Reviewers speeds up the review process and reduces bottlenecks by quickly identifying available and willing reviewers.\n- **Personalized suggestions.** Suggested Reviewers considers individual expertise and past interactions, providing tailored recommendations.\n\n## How to get started with Suggested Reviewers\n\nIn this blog post, we've explored how Suggested Reviewers transforms the development lifecycle. It's not just about faster code reviews; it's about faster, more efficient, and more collaborative software development from start to finish. \n\n> Learn more in our [Suggested Reviewers documentation](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/#suggested-reviewers).\n",[786,1971,9],"collaboration",{"slug":1973,"featured":6,"template":680},"gitlab-suggested-reviewers","content:en-us:blog:gitlab-suggested-reviewers.yml","Gitlab Suggested Reviewers","en-us/blog/gitlab-suggested-reviewers.yml","en-us/blog/gitlab-suggested-reviewers",{"_path":1979,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1980,"content":1985,"config":1990,"_id":1992,"_type":14,"title":1993,"_source":16,"_file":1994,"_stem":1995,"_extension":19},"/en-us/blog/gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops",{"title":1981,"description":1982,"ogTitle":1981,"ogDescription":1982,"noIndex":6,"ogImage":1884,"ogUrl":1983,"ogSiteName":693,"ogType":694,"canonicalUrls":1983,"schema":1984},"GitLab survey highlights wins, challenges as orgs adopt DevSecOps","This year’s survey findings show that DevSecOps principles, together with a DevSecOps platform, help organizations ship more secure software, faster.","https://about.gitlab.com/blog/gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab survey highlights wins, challenges as orgs adopt DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2023-04-20\",\n      }",{"title":1981,"description":1982,"authors":1986,"heroImage":1884,"date":1987,"body":1988,"category":1156,"tags":1989},[1486],"2023-04-20","\nSecurity is everyone’s responsibility. And when everyone works together and has access to the same tools, you don’t have to sacrifice performance, efficiency, or security. That's the message from the respondents of our recent survey of software developers, IT operations, and IT security professionals worldwide. Where there is unity among Development, Security, and Operations in the common goal of securing the software supply chain, there is success.\n\nOur first report from the survey, [Security Without Sacrifices](https://about.gitlab.com/developer-survey/previous/2023/), focuses on this throughline and illuminates where [DevSecOps](/topics/devsecops/) professionals feel positive about their efforts to secure the software development lifecycle and where they feel work still needs to be done. While the results are not surprising — they align with what I hear from customers every day — they reinforce GitLab’s belief that DevSecOps principles, coupled with a DevSecOps platform, help organizations ship more secure software, faster. \n\nFor instance, in last year’s report, a majority of development, security, and operations professionals said they felt individually responsible for security. This year, 53% of respondents said they are responsible for application security *as part of a larger team*. And 71% of security professionals said at least a quarter of all security vulnerabilities are being spotted by developers, up from 53% in 2022.\n\nWhat this tells us is that security is indeed making its way deep into the software development lifecycle and as more innovation is introduced into the daily workflow, including AI-assisted capabilities, the benefits are tangible.\n\nHere’s what the report findings suggest organizations should keep in mind so they can get the most out of DevSecOps.\n\n## AI is now inseparable from DevSecOps\nFor the past several years, we’ve seen AI become more and more established in software development workflows. In this year’s report, nearly two-thirds (65%) of developers said they are using AI in testing efforts or will be in the next three years. We also saw an uptick this year in the number of developers who are using AI to check code.\n\nAI represents a tectonic shift in the market that will have profound effects on how organizations deliver value to customers. To take full advantage of AI, it will be critical for organizations to apply AI-assisted workflows across the entire software development lifecycle and make them available to all personas — not just developers but everyone involved in the delivery of software value, from security and compliance teams to product development and marketing.\n\n## Security toolchain expansion is unsustainable\nThis year’s report showed that toolchain sprawl may be a bigger concern for security professionals than for the rest of the team; 57% of security respondents said they use six or more tools, compared to 48% of developers and 50% of operations professionals. We’re also seeing signs that security professionals are using _more_ tools than in past years. This is in line with what security practitioners tell me: They use different tools for each security function, including composition analysis, fuzzing, DAST, and dependency scanning.\n\nThe rise of DevOps and DevSecOps is making it easier for software development teams to consolidate tools, but the increased pressure around software supply chain security means this trend is not holding for security as it is for other roles. Security practitioners select the tools that get the job done and the tools they’re most comfortable with, but as security budgets tighten, that’s no longer going to be a sustainable strategy. We should expect to see a bigger push to consolidate security toolchains over the next several years.\n\n## Efficiency and security cannot be mutually exclusive\nThe first wave of budget tightening seems to be here already — 85% of the security professionals we surveyed told us they have the same or less budget this year than they did in 2022, and security professionals were also more likely than both developers and operations professionals to cite macroeconomic forces as a primary factor driving DevOps/DevSecOps to scale at their organizations. In this environment, organizations (and security teams) need to do more with less.\n\nFor many of the organizations I’ve talked to, tighter budgets mean more than just cutting costs. Organizations need to ensure they’re getting a swifter return on their DevSecOps investments. That return on investment could look like increased efficiency, translating into accelerated value delivery for customers, faster innovation, and more revenue. Or it could mean incorporating security and compliance tools earlier in the development lifecycle, reducing risk. Ideally, it’s all of the above. As organizations seek ways to stay ahead of the competition, security and efficiency are both non-negotiable.\n\n## A platform approach: The winning formula for DevSecOps\nHow can organizations foster collaboration, reduce toolchain friction, and boost efficiency without sacrificing security? A platform that puts DevSecOps methodologies into practice. This year’s respondents identified security and efficiency as the top two benefits of adopting a DevSecOps platform, ahead of automation, cost savings, and collaboration.\n\nA DevSecOps platform enables teams to collaborate in a single application, shortening cycle times, reducing risks, and accelerating everyone’s workflows. We see proof points in this year’s data: Security professionals who use a DevSecOps platform were significantly more likely than those who don’t use a platform to say developers catch more security vulnerabilities and had a higher opinion of their organization’s security efforts. \n\nIt has become important for organizations to foster collaboration and engagement to keep development, security, and operations teams happy. \n\n## Explore this year’s report\nRead the first report in our 2023 Global DevSecOps Report Series, [Security Without Sacrifices](https://about.gitlab.com/developer-survey/), and stay tuned for more reports on the data in the coming months.\n",[1894,703,678,9],{"slug":1991,"featured":6,"template":680},"gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops","content:en-us:blog:gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops.yml","Gitlab Survey Highlights Wins Challenges As Orgs Adopt Devsecops","en-us/blog/gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops.yml","en-us/blog/gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops",{"_path":1997,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1998,"content":2003,"config":2009,"_id":2011,"_type":14,"title":2012,"_source":16,"_file":2013,"_stem":2014,"_extension":19},"/en-us/blog/gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation",{"title":1999,"description":2000,"ogTitle":1999,"ogDescription":2000,"noIndex":6,"ogImage":861,"ogUrl":2001,"ogSiteName":693,"ogType":694,"canonicalUrls":2001,"schema":2002},"GitLab uses Anthropic for smart, safe AI-assisted code generation","Anthropic’s Claude AI model supports the delivery of helpful, trusted code in GitLab Duo Code Suggestions.","https://about.gitlab.com/blog/gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab uses Anthropic for smart, safe AI-assisted code generation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kevin Chu\"}],\n        \"datePublished\": \"2024-01-16\",\n      }",{"title":1999,"description":2000,"authors":2004,"heroImage":861,"date":2006,"body":2007,"category":10,"tags":2008},[2005],"Kevin Chu","2024-01-16","GitLab recently launched GitLab Duo Code Suggestions into general availability. Code Suggestions includes the ability to generate algorithms or code blocks directly within the developer's IDE, a capability that uses [Anthropic's](https://www.anthropic.com/) generative AI model, [Claude](https://www.anthropic.com/index/claude-2-1). Integrated into the GitLab Duo portfolio of AI-assisted features, Claude is compatible with GitLab’s principles of [transparency and privacy](https://about.gitlab.com/the-source/ai/building-a-transparency-first-ai-strategy-7-questions-to-ask-your-devops/) by design and provides a high-integrity foundation for code generation.\n\nIn this post, you'll learn the advantages of code generation and how GitLab, together with Anthropic, is leveraging AI to responsibly boost developer productivity.\n\n## How AI-assisted code generation works\n\nCode Suggestions is incredibly useful as a coding companion that shows the suggestions as a developer types. It helps save developer time and keystrokes, reducing the effort for rote tasks and giving developers time back in their day. But what if a developer wants to do even more with generative AI?\n\nEnter code generation.\n\nImagine needing to write a new complex function based on an unfamiliar algorithm, or write a large amount of boilerplate code. Instead of struggling through these tasks with gritted teeth, code generation allows developers to simply define what they want to do in comments or multi-line comment blocks, and then Code Suggestions generates the code from there.\n\nHere is an example of Code Suggestions generating a JavaScript function that calculates the Levenshtein distance, a string metric useful for comparing the difference between two sequences:\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175962/Blog/lkrk16unp4dcy3c4zwvw.gif\" alt=\"Code Suggestions generating JavaScript function\" width=\"100%\" height=\"auto\">\n\nHere is another example showing a multi-line comment in Python. We want Code Suggestions to generate a Tornado Web Server that does three things: log in, run a scan, and review the results. By providing the specific instructions, including details such as the framework and the components to use,, Code Suggestions was able to generate a Tornado App, despite this author being unfamiliar with Tornado.\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175967/Blog/pxcdppnpzwfhgopxh999.gif\" alt=\"Code Suggestions generating Tornado app\" width=\"100%\" height=\"auto\">\n\nSafety through focus and trustworthiness\nDevelopers expect AI coding assistants to not only be helpful, but also accurate and safe. The system should generate precisely what is asked for while limiting deviation and [hallucination](https://www.ibm.com/topics/ai-hallucinations). Customers want assurances that AI-generated code can be trusted.\n\nThroughout GitLab's evaluation of certain code generation models, Claude stood out for its ability to mitigate distracting, unsafe, or deceptive behaviors. Claude also demonstrated consistent and accurate code generation throughout our testing.\n\nGitLab's use of Anthropic's Claude enables Code Suggestions to balance automation with trust. Code Suggestions helps users become more efficient without sacrificing reliability — a win for augmented development.\n\n## What’s next\n\nReady to experience the future of code generation? Start your [free trial of GitLab Duo](https://about.gitlab.com/gitlab-duo/) today and unlock the power of AI-assisted development!",[9,231,703,849],{"slug":2010,"featured":91,"template":680},"gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation","content:en-us:blog:gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation.yml","Gitlab Uses Anthropic For Smart Safe Ai Assisted Code Generation","en-us/blog/gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation.yml","en-us/blog/gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation",{"_path":2016,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2017,"content":2022,"config":2027,"_id":2029,"_type":14,"title":2030,"_source":16,"_file":2031,"_stem":2032,"_extension":19},"/en-us/blog/gitlab-visual-studio-extension",{"title":2018,"description":2019,"ogTitle":2018,"ogDescription":2019,"noIndex":6,"ogImage":820,"ogUrl":2020,"ogSiteName":693,"ogType":694,"canonicalUrls":2020,"schema":2021},"GitLab for Visual Studio, including code suggestions, available in Beta","GitLab for Visual Studio is now available in Beta, bringing GitLab Duo code suggestions to Visual Studio.","https://about.gitlab.com/blog/gitlab-visual-studio-extension","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab for Visual Studio, including code suggestions, available in Beta\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kai Armstrong\"}],\n        \"datePublished\": \"2023-06-29\",\n      }",{"title":2018,"description":2019,"authors":2023,"heroImage":820,"date":2024,"body":2025,"category":10,"tags":2026},[1448],"2023-06-29","\n\n\u003Ci>This blog is the latest post in an ongoing series about GitLab's journey to \u003Ca href=\"/blog/ai-ml-in-devsecops-series/\">build and integrate AI/ML into our DevSecOps platform\u003C/a>. The first blog post can be found \u003Ca href=\"/blog/what-the-ml-ai/\">here\u003C/a>. Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nIn June, we shared our plans to [extend code suggestions](/blog/extending-code-suggestions/) to more IDEs, thereby continuing to enhance developer productivity. Over the past several weeks, we've been iterating quickly and we can share that GitLab for Visual Studio is available ([in Beta](https://docs.gitlab.com/ee/policy/experiment-beta-support.html#beta)) from the [Visual Studio Marketplace](https://marketplace.visualstudio.com/items?itemName=GitLab.GitLabExtensionForVisualStudio).\n\nThe GitLab for Visual Studio extension supports [GitLab Duo](https://about.gitlab.com/gitlab-duo/) code suggestions for both [GitLab SaaS](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-gitlab-saas) and [GitLab self-managed](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-self-managed-gitlab).\n\nDownload it and let us know what you think and any issues you're having in our [feedback issue](https://gitlab.com/gitlab-org/editor-extensions/gitlab-visual-studio-extension/-/issues/38).\n\n## Getting started\nTo get started with the extension, download it from the [Visual Studio Marketplace](https://marketplace.visualstudio.com/items?itemName=GitLab.GitLabExtensionForVisualStudio). After the extension is downloaded and installed, you can follow our [setup instructions](https://gitlab.com/gitlab-org/editor-extensions/gitlab-visual-studio-extension/-/blob/main/README.md#setup) to get it configured.\n\n![GitLab for Visual Studio setup](https://about.gitlab.com/images/blogimages/gitlab-visual-studio-extension-setup.png)\n\n## Using the extension\nOnce you've set up the extension, make sure things are configured properly and authentication is working by checking the status bar icon.\n\n![GitLab for Visual Studio status bar icon](https://about.gitlab.com/images/blogimages/gitlab-visual-studio-extension-status-bar-icon.png)\n\nIf everything looks good, you're ready to start receiving code suggestions as you work. Just start typing and GitLab Duo will automatically provide you suggestions inline. You can press \u003Ckbd>Tab\u003C/kbd> to accept the suggestions or just keep typing to receive new suggestions.\n\n![GitLab for Visual Studio suggestions](https://about.gitlab.com/images/blogimages/gitlab-visual-studio-extension-suggestions.gif)\n\n## Iterating on AI/ML features\nWhile this brings us one step closer to reaching developers working in Visual Studio, we still have our eyes on the [JetBrains IDEs](https://gitlab.com/gitlab-org/editor-extensions/gitlab-jetbrains-plugin) as well as a native integration for [Neovim](https://gitlab.com/gitlab-org/editor-extensions/gitlab.vim). You can track these projects and stay tuned for future announcements regarding their availability.\n\nWe're also working on a [GitLab Language Server for code suggestions](https://gitlab.com/gitlab-org/editor-extensions/gitlab-language-server-for-code-suggestions). This allows us to not only standardize and iterate faster on our IDE extensions, but for users of IDEs and Editors to use GitLab Duo code suggestions even if we're not officially providing an extension. We look forward to providing more documentation and working with the community on this project in the future.\n\nThese efforts are just the start of how we're bringing GitLab Duo capabilities throughout the software development lifecycle to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI-assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\" series.\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[703,725,704,9],{"slug":2028,"featured":6,"template":680},"gitlab-visual-studio-extension","content:en-us:blog:gitlab-visual-studio-extension.yml","Gitlab Visual Studio Extension","en-us/blog/gitlab-visual-studio-extension.yml","en-us/blog/gitlab-visual-studio-extension",{"_path":2034,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2035,"content":2041,"config":2046,"_id":2048,"_type":14,"title":2049,"_source":16,"_file":2050,"_stem":2051,"_extension":19},"/en-us/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code",{"title":2036,"description":2037,"ogTitle":2036,"ogDescription":2037,"noIndex":6,"ogImage":2038,"ogUrl":2039,"ogSiteName":693,"ogType":694,"canonicalUrls":2039,"schema":2040},"Developing GitLab Duo: Secure and thoroughly test AI-generated code","Learn step-by-step how to enhance AI-generated code reliability and security using GitLab Duo and GitLab Pages (includes code samples and prompts).","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097183/Blog/Hero%20Images/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25_7JlF3WlEkswGQbcTe8DOTB_1750097183481.png","https://about.gitlab.com/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: Secure and thoroughly test AI-generated code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2024-05-30\",\n      }",{"title":2036,"description":2037,"authors":2042,"heroImage":2038,"date":2043,"body":2044,"category":10,"tags":2045},[1175],"2024-05-30","___Generative AI marks a monumental shift in the software development industry, making it easier to develop, secure, and operate software. Our new blog series, written by our product and engineering teams, gives you an inside look at how we create, test, and deploy the AI features you need integrated throughout the enterprise. Get to know new capabilities within GitLab Duo and how they will help DevSecOps teams deliver better results for customers.___\n\nAs AI becomes increasingly integral to software development, ensuring the security and thorough testing of AI-generated code is paramount. This article is a step-by-step guide to combining [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI features to powering the DevSecOps workflow, and [GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) to secure and thoroughly test AI-generated code. You'll discover how to mitigate common risks, including how to automatically generate tests, test code, and deploy test reports – all to enhance the reliability of your AI-generated code.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n## Challenges in AI-generated code\n\nAI-generated code often faces issues such as:\n\n- Mismatched algorithms: Incorrect or suboptimal algorithms can be generated.\n- Dependency problems: AI may include dependencies that are outdated or incompatible.\n- Security vulnerabilities: AI might generate code with potential security flaws.\n\nAI-generated code often faces issues such as mismatched algorithms, dependency problems, and security vulnerabilities. A [recent study published by the Association of Computing Machinery](https://dl.acm.org/doi/pdf/10.1145/3613904.3642596) on ChatGPT’s responses to programming questions found that 52% of the answers contained incorrect information, and 77% were overly verbose. Despite these shortcomings, users preferred ChatGPT’s comprehensive and well-articulated answers 35% of the time, even overlooking misinformation 39% of the time. Addressing these challenges requires the use of advanced tools and frameworks.\n\n## GitLab’s approach to AI security and testing\n\nGitLab has a comprehensive content strategy focused on integrating security measures within the development workflow. By leveraging GitLab Duo for AI-powered code generation and GitLab Pages for embedding test reports, developers can ensure their AI-generated code is both secure and reliable.\n\nHere is a step-by-step guide to pair GitLab Duo and GitLab Pages to ensure secure and thoroughly tested AI-generated code by implementing a [Flask web server](https://flask.palletsprojects.com/en/3.0.x/).\n\n### 1. Create a new project on GitLab.com\n\n- Go to [GitLab.com](http://GitLab.com).\n- Click on the \"New project\" button.\n- Choose \"Create blank project\".\n- Enter a project name (e.g., AI_Code_Security).\n- Set the visibility level (Public, Internal, or Private).\n- Click \"Create project\".\n\n### 2. Enable GitLab Duo Code Suggestions\n\n- Navigate to your project.\n- Click on the \"Web IDE\" button to open the Web IDE.\n- Ensure that GitLab Duo features like Code Suggestions and Duo Chat are enabled. \n- Start coding in the [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/). As you type, GitLab Duo will provide code suggestions to help you write code more efficiently.\n\n### 3. Create a Flask web server\n\nYou can create a Flask web server using the comment (highlighted in green) in the screenshot below.\n\n![DGD testing - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097192/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097192520.png)\n\n### 4. Generate tests with GitLab Duo\n\nUnit tests are essential for validating the functionality of the generated code. Use GitLab Duo’s `/tests` command to [generate testing suggestions directly in the Web IDE](https://docs.gitlab.com/ee/user/gitlab_duo_chat_examples.html#write-tests-in-the-ide). This command can be tailored with additional instructions to focus on specific aspects such as performance, regression, or using particular frameworks.\n\n#### Example usage in Web IDE:\n\n- Select the code for which you want to generate tests.\n- Use the command `/tests` followed by additional instructions if needed.\n\n![DGD testing - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097192/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097192521.png)\n\n### 5. Identify issues with AI-generated code using GitLab Duo Chat\n\nUse GitLab Duo Chat to review and refine AI-generated code. For instance, let's check our Flask web server code for security vulnerabilities:\n\n```unset\nPrompt: Review this code for potential security vulnerabilities and dependency issues.\n\n```\n\n![DGD testing - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097192/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097192523.png)\n\nGitLab Duo Chat can help identify the vulnerabilities in the above code.\n\n### 6. Generate test reports\nAfter running your tests, generate a test report that will be deployed using GitLab Pages.\n\n```unset\n\nPrompt: Write me a python script to generate a test report that will be deployed using \nGitLab Pages.\n\n```\n\n![DGD testing - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097192/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097192525.png)\n\nWhat's happening here?\n\n- The script ensures the test_reports directory exists.\n- It runs the `test_server.py` file using `subprocess.run()`, capturing the output.\n- The raw output is saved to `test_reports/test_output.txt`.\n- An HTML report is generated, embedding the test output within `\u003Cpre>` tags for readability, and saved as `test_reports/index.html`.\n\n### 7. Deploy the test report with GitLab Pages\n\nUse [GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) to host and display the test report. Here’s the configuration for our `.gitlab-ci.yml` file to deploy the test report.\n\n```python\n\nstages:\n  - test\n  - deploy\ntest_job:\n  stage: test\n  script:\n    - python generate_test_report.py\n  artifacts:\n    paths:\n      - test_reports/\npages:\n  stage: deploy\n  script:\n    - mv test_reports public\n  artifacts:\n    paths:\n      - public\n\n ```\n\nWith this setup, the `test_job` stage runs the Python script to generate the test report. The `pages` stage moves the `test_reports` directory to `public`, which GitLab Pages uses to serve the content.\n\n### 8. Embedding test reports in MR widgets\n\nEmbedding the [test reports in MR widgets](https://docs.gitlab.com/ee/ci/testing/unit_test_reports.html) provides immediate visibility into the testing outcomes, ensuring transparency and reliability. This can be done by including the test report as an artifact in your CI/CD pipeline configuration:\n\n```python\n\nstages:\n  - build\n  - test\n  - deploy\n\nbuild_job:\n  stage: build\n  script:\n    - echo \"Building the project...\"\n    - # Your build commands here\n\ntest_job:\n  stage: test\n  script:\n    - mkdir -p test-reports\n    - python test_server.py > test-reports/results.xml\n  artifacts:\n    when: always\n    reports:\n      junit: test-reports/results.xml\n    paths:\n      - test-reports/results.xml\n\npages:\n  stage: deploy\n  script:\n    - mkdir .public\n    - mv test-reports .public/\n  artifacts:\n    paths:\n      - .public\n\n```\nBy including the test report as an artifact and specifying it in the reports section, GitLab will automatically display the test results in the MR widgets, providing immediate visibility into the testing outcomes and enhancing transparency and reliability.\n\n### Case study: AI reliability with security policies and scanners\n\nImagine a scenario where an AI-generated code snippet introduces a dependency that has known vulnerabilities. By using GitLab Duo and its security policies, this dependency would be flagged during the code generation process. Let’s consider an example where a snippet was generated by AI:\n\n```python\n\nimport os\nfrom flask import Flask, request\n\napp = Flask(__name__)\n\n@app.route('/search')\ndef search():\n    query = request.args.get('query')\n    execute_os_command(query)\n    return 'You searched for: ' + query\n\ndef execute_os_command(command):\n    os.system(command)\n\nif __name__ == '__main__':\n    app.run()\n\n```\n\nIn this example, the search endpoint is vulnerable to OS command injection. By leveraging GitLab's Static Application Security Testing ([SAST](https://docs.gitlab.com/ee/user/application_security/sast/)) component, this vulnerability would be detected during the CI/CD pipeline.\n\n#### Integrate SAST scanning to detect vulnerabilities\n\nGitLab SAST automatically analyzes your code for security vulnerabilities. Here’s how it can be integrated into your `.gitlab-ci.yml` file to scan for issues:\n\n```python\n\nstages:\n  - build\n  - test\n  - sast\n  - deploy\n\nbuild_job:\n  stage: build\n  script:\n    - echo \"Building the project...\"\n    - # Your build commands here\n\ntest_job:\n  stage: test\n  script:\n    - python test_server.py > test-reports/results.xml\n  artifacts:\n    when: always\n    reports:\n      junit: test-reports/results.xml\n    paths:\n      - test-reports/results.xml\n\nsast_job:\n  stage: sast\n  script:\n    - echo \"Running SAST...\"\n  artifacts:\n    reports:\n      sast: gl-sast-report.json\n  only:\n    - branches\n\npages:\n  stage: deploy\n  script:\n    - mv test-reports public\n  artifacts:\n    paths:\n      - public\n\n```\n\nIn this configuration, the `sast_job` stage runs SAST to detect vulnerabilities in the code, producing a report (`gl-sast-report.json`) that will be included in the pipeline artifacts! By integrating security policies and robust testing frameworks, GitLab Duo helps customers ensure that their AI-generated code is both efficient and secure.\n\n## Get started today\nThe integration of AI in software development brings significant benefits but also introduces new challenges. By using tools like GitLab Duo and GitLab Pages, developers can ensure that their AI-generated code is secure, well-tested, and reliable. Explore these tools today and join the conversation on enhancing AI security and testing!\n\n> [Start a 30-day trial of GitLab Ultimate](https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/blog&glm_content=default-saas-trial) today to access GitLab Duo and GitLab Pages.\n\n## Read more of the \"Developing GitLab Duo\" series\n\n- [Developing GitLab Duo: How we validate and test AI models at scale](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale/)\n- [Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n- [Developing GitLab Duo: How we are dogfooding our AI features](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features/)\n- [Developing GitLab Duo: Blending AI and Root Cause Analysis to fix CI/CD pipelines](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/)\n",[9,703,724,678],{"slug":2047,"featured":6,"template":680},"how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code","content:en-us:blog:how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code.yml","How Gitlab Duo Helps Secure And Thoroughly Test Ai Generated Code","en-us/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code.yml","en-us/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code",{"_path":2053,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2054,"content":2060,"config":2066,"_id":2068,"_type":14,"title":2069,"_source":16,"_file":2070,"_stem":2071,"_extension":19},"/en-us/blog/how-gitlab-uses-prompt-guardrails-to-help-protect-customers",{"title":2055,"description":2056,"ogTitle":2055,"ogDescription":2056,"noIndex":6,"ogImage":2057,"ogUrl":2058,"ogSiteName":693,"ogType":694,"canonicalUrls":2058,"schema":2059},"How GitLab uses prompt guardrails to help protect customers","Learn what prompt guardrails are, how they help mitigate security risks, and what unique considerations GitLab has taken into account when implementing them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663918/Blog/Hero%20Images/aipower.jpg","https://about.gitlab.com/blog/how-gitlab-uses-prompt-guardrails-to-help-protect-customers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab uses prompt guardrails to help protect customers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"},{\"@type\":\"Person\",\"name\":\"Roger Woo\"}],\n        \"datePublished\": \"2025-01-30\",\n      }",{"title":2055,"description":2056,"authors":2061,"heroImage":2057,"date":2063,"body":2064,"category":10,"tags":2065},[1175,2062],"Roger Woo","2025-01-30","Imagine introducing a powerful new AI tool that boosts your team's productivity — accelerating code development, resolving issues faster, and streamlining workflows. The excitement is palpable, but questions about security and compliance quickly arise. How do you manage the risk of AI inadvertently exposing sensitive data or responding to malicious prompts? This is where prompt guardrails play a crucial role.\n\nPrompt guardrails are structured safeguards – combining instructions, filters, and context boundaries – designed to guide AI models toward secure and reliable responses. Think of them as safety rails on a bridge, working to keep data and interactions on the correct path while supporting your organization's security protocols. In this article, we'll explore how GitLab implements these guardrails, the risks they address, and their importance for security-conscious enterprises and compliance-focused teams.\n\n## Why prompt guardrails matter\n\nAI models have transformed how organizations work, offering powerful tools to enhance productivity and innovation. However, this power comes with inherent risks. Without safeguards, AI systems may unintentionally disclose sensitive information, such as personally identifiable information (PII) or proprietary business data, or potentially act on malicious instructions. Prompt guardrails address these challenges by creating boundaries for AI models to access and process approved content, contributing to reduced risk of unintended data exposure or manipulation.\n\nFor businesses operating under strict regulations like GDPR, prompt guardrails serve as essential protection mechanisms. More importantly, they build trust among decision-makers, end users, and customers, demonstrating [GitLab's commitment to secure and responsible AI usage](https://about.gitlab.com/blog/introducing-the-gitlab-ai-transparency-center/). With prompt guardrails in place, teams can embrace AI's potential while maintaining focus on protecting their critical assets.\n\n## GitLab’s approach to prompt guardrails\n\nAt GitLab, we're [building AI features](https://about.gitlab.com/blog/categories/ai-ml/) with security, transparency, and accountability in mind because we understand these elements are critical for our enterprise customers and their auditors.  \n\nHere’s how we’re putting that into practice.\n\n### Structured prompts and context boundaries\n\nOur system utilizes tags – like `\u003Cselected_code>` or `\u003Clog>` – to define boundaries for AI model interactions. When users ask GitLab Duo to troubleshoot a job failure, relevant logs are encapsulated in `\u003Clog>` tags. This structure guides the model to focus on specific data while working to prevent the influence from unauthorized or out-of-scope information.\n\n### Filtering and scanning tools\n\nWe employ tools like Gitleaks to scan inputs for secrets (API keys, passwords, etc.) before transmission to the AI. This filtering process helps minimize the potential for exposing confidential information or sending credentials into a model's prompt.\n\n### Role-based insights\n\nOur guardrails support focused AI discussions while contributing to customers' compliance efforts through controlled data handling and clear documentation. Organizations can adopt AI solutions designed to align with enterprise policies and risk tolerances.\n\n## Different approaches to prompt guardrails\n\nPrompt guardrails aren't one-size-fits-all solutions. Different strategies offer unique advantages, with effectiveness varying by use case and organizational requirements. GitLab combines multiple approaches to create a comprehensive system designed to balance security with usability.\n\n### System-level filters: The first line of defense\n\nSystem-level filters serve as a proactive barrier, scanning prompts for restricted keywords, patterns, or potentially harmful content. These filters work to identify and block potential risks — such as profanity, malicious commands, or unauthorized requests — before they reach the AI model.\n\nThis approach requires continuous updates to maintain effectiveness. As threats evolve, maintaining current libraries of restricted keywords and patterns becomes crucial. GitLab integrates these filters into its workflows to address potential risks at the earliest stage.\n\n### Model instruction tuning: Teaching the AI to stay on track\n\nInstruction tuning involves configuring AI behavior to align with specific guidelines. Our AI models are designed to reduce potentially problematic behaviors like role play, impersonation, or generating inappropriate content.\n\nThis foundation supports responses that remain informative, professional, and focused. When summarizing discussions or analyzing code, the AI maintains focus on the provided context, ideally mitigating potential deviation into unrelated topics.\n\n### Sidecar or gateway solutions: Adding a layer of protection\n\nSidecar or gateway solutions function as security checkpoints between users and AI models, processing both inputs and outputs. Like a customs officer reviewing luggage, these components help ensure only appropriate content passes through.\n\nThis approach proves particularly valuable in environments requiring strict information control, such as regulated industries or compliance-driven workflows.\n\n### Why GitLab combines these approaches\n\nNo single strategy addresses all potential risks. GitLab's hybrid approach combines system-level filters, instruction tuning, and sidecar solutions to create a robust security framework while maintaining usability.\n\nSystem-level filters provide initial screening, while instruction tuning aligns AI behavior with security standards. Sidecar solutions offer additional oversight, supporting transparency and control over data flow.\n\nThis combination creates a framework designed to support confident AI adoption while aiming to protect sensitive data and maintain compliance requirements.\n\n## Lessons learned\n\nWhile prompt guardrails help to significantly reduce risks, no system is infallible. Here are some lessons we have learned along the way:\n\n* Overly restrictive rules might hamper legitimate usage, frustrate developers, or slow down workflows. Striking the right balance between protecting data and providing real value is key.\n* Threat landscapes change, as do the ways people use AI. Regular updates to guardrails support alignment with current requirements and potential threats\n* At GitLab, we understand that no system can promise absolute security. Instead of making guarantees, we emphasize how our guardrails are designed to reduce risks and strengthen your defenses. This transparent approach builds trust by acknowledging that security is an ongoing process — one that we continuously refine to help support your organization’s evolving needs.\n* We gather feedback from actual user scenarios to iterate on our guardrails. Real-world insights help us refine instructions, tighten filters, and improve scanning tools over time.\n\n## Summary\n\nPrompt guardrails go beyond being a technical solution — they represent GitLab’s commitment to prioritizing AI security for our customers. By helping to reduce exposure, block harmful inputs, and ensure clear traceability of AI interactions, these guardrails aim to provide your teams with the confidence to innovate securely.\n\nWith [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our structured prompts, scanning tools, and carefully tuned instructions work together to help keep AI capabilities aligned with compliance standards and best practices. Whether you’re a developer, auditor, or decision-maker, these safeguards aim to enable you to embrace AI confidently while staying true to your organization’s security and compliance goals.\n\n> [Learn more about GitLab Duo and get started with a free, 60-day trial today!](https://about.gitlab.com/gitlab-duo/)",[9,1036],{"slug":2067,"featured":91,"template":680},"how-gitlab-uses-prompt-guardrails-to-help-protect-customers","content:en-us:blog:how-gitlab-uses-prompt-guardrails-to-help-protect-customers.yml","How Gitlab Uses Prompt Guardrails To Help Protect Customers","en-us/blog/how-gitlab-uses-prompt-guardrails-to-help-protect-customers.yml","en-us/blog/how-gitlab-uses-prompt-guardrails-to-help-protect-customers",{"_path":2073,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2074,"content":2080,"config":2087,"_id":2089,"_type":14,"title":2090,"_source":16,"_file":2091,"_stem":2092,"_extension":19},"/en-us/blog/how-is-ai-ml-changing-devops",{"title":2075,"description":2076,"ogTitle":2075,"ogDescription":2076,"noIndex":6,"ogImage":2077,"ogUrl":2078,"ogSiteName":693,"ogType":694,"canonicalUrls":2078,"schema":2079},"How is AI/ML changing DevOps?","Can DevOps help AI/ML find maturity? Here are questions to consider.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667540/Blog/Hero%20Images/devops-team-structure.jpg","https://about.gitlab.com/blog/how-is-ai-ml-changing-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How is AI/ML changing DevOps?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-11-16\",\n      }",{"title":2075,"description":2076,"authors":2081,"heroImage":2077,"date":2083,"body":2084,"category":2085,"tags":2086},[2082],"Brendan O'Leary","2022-11-16","\n\nThe last few years have seen an explosion in artificial intelligence, [machine learning](/blog/top-10-ways-machine-learning-may-help-devops/), and other types of projects. Companies like Hugging Face and applications like [DALL-E 2](https://openai.com/dall-e-2/) have brought to the mainstream what the power of AI/ML can bring to the next generation of computing and software. As every company has become a software company over the last few decades, the ability to innovate and leverage the ever-growing amount of data that organizations have access to have become where enterprises turn to compete.\n\nHowever, a lot of AI/ML projects get stalled from several challenges that may seem familiar to software professionals who have been around since [the early days of DevOps](/blog/the-journey-to-a-devops-platform/).  Adoption and optimization of artificial intelligence and machine learning have been hampered by a lack of repeatability for experiments, a disparity of tools and information silos, and a lack of team collaboration.\n\n## A new model for data modeling\n\nOne of the first ways to look at this problem is to make sure that the mental model is in place to allow the team to reason about both the strategic vision for AI/ML at your organization. And once that has been established, also think about the tactical “jobs to be done” to lay the foundation for that work.\n\nStrategically, there are many teams that have to come together for a successful AI/ML program. First, the data has to both be acquired and transformed into a usable set of clean data. Often referred to as [“DataOps”](/blog/introducing-modelops-to-solve-data-science-challenges/) this involves the typical “ETL” or extract, load, transform processes data has to go through to be useful for teams. From there, you have to productionize the data workloads through MLOps - the experimentation, training, testing, and deployment of meaningful models based on the extracted and transformed data.\n\nAnd once those two steps are complete, you can finally understand how to make production use cases for your data. You can use AI Assisted features to focus on improving user experiences, for financial forecasting, or for general trends and analysis of various parts of your business. Given the complexity of this value chain, the various teams and skills involved, and the current mishmash of tooling, there is a lot that teams can learn from the history of DevOps as they tackle these problems.\n\n## DevOps and AI/ML\n\nMuch like the various stages of obtaining and applying AI/ML for business uses, software development consists of many varied steps with different teams and skills sets to achieve the business goals outlined. That is why years ago, folks came up with this [concept of “DevOps”](/topics/devops/)– combining teams and having them work together in a cycle of continuous improvement towards the same goals – to combat silos and inefficiencies. \n\nData science teams are using specialized tools that don't integrate with the existing software development lifecycle tools they already use. This causes teams to work in silos, creating handoff friction and resulting in finger-pointing and lack of predictability. Businesses and software teams often fail to take advantage of data, and it takes months for models to get into production by which time they may be out of date or behind competitors.  Security and data ethics are frequently treated as an afterthought. This creates risk for organizations and slows innovation. \n\n## Learning from the past\n\nIf the past decades of “DevOps” evolution have taught us anything, it's that breaking down the silos between teams through the tools and processes they are using pays off dividends for business. As your team begins their [AI/ML journey](/blog/why-ai-in-devops-is-here-to-stay/) — or if you've found yourself stalling in AI/ML initiatives already — you should consider how you can consolidate teams together, ensure they are working efficiently together, and able to collaborate without boundaries.\n\nAn explosion of tools in the space is tantalizing with the promise of “getting started” quickly. But it may not set your organization up for long-term success in these areas if those tools have the effect of separating parts of your organization from one another. Creating and sustaining an AI/ML program will require intentionality behind both the processes and tools your team is using. That allows your teams to extract, transform and load data efficiently, tune, test and deploy models effectively, and leverage AI/ML to drive value for your stakeholders for the long haul.\n","insights",[849,231,1037,9],{"slug":2088,"featured":6,"template":680},"how-is-ai-ml-changing-devops","content:en-us:blog:how-is-ai-ml-changing-devops.yml","How Is Ai Ml Changing Devops","en-us/blog/how-is-ai-ml-changing-devops.yml","en-us/blog/how-is-ai-ml-changing-devops",{"_path":2094,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2095,"content":2101,"config":2109,"_id":2111,"_type":14,"title":2112,"_source":16,"_file":2113,"_stem":2114,"_extension":19},"/en-us/blog/how-telesphora-is-tackling-the-opioid-crisis-machine-learning-human-centered-design",{"title":2096,"description":2097,"ogTitle":2096,"ogDescription":2097,"noIndex":6,"ogImage":2098,"ogUrl":2099,"ogSiteName":693,"ogType":694,"canonicalUrls":2099,"schema":2100},"Fighting the opioid epidemic with ML & human-centered design","GitLab users Jack Cackler and Frank Lee explain how they use predictive analytics to empower community stakeholders, like first responders and policy makers, to save lives.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671593/Blog/Hero%20Images/telesphora-team.jpg","https://about.gitlab.com/blog/how-telesphora-is-tackling-the-opioid-crisis-machine-learning-human-centered-design","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Telesphora is tackling the opioid epidemic with machine learning and human-centered design\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Erica Lindberg\"}],\n        \"datePublished\": \"2018-09-05\",\n      }",{"title":2102,"description":2097,"authors":2103,"heroImage":2098,"date":2105,"body":2106,"category":2085,"tags":2107},"How Telesphora is tackling the opioid epidemic with machine learning and human-centered design",[2104],"Erica Lindberg","2018-09-05","\n\nOn average, [115 Americans die every day](https://www.cdc.gov/drugoverdose/epidemic/index.html) from an opioid overdose. The team at [Telesphora](https://telesphora.com/) is on a mission to help health care professionals and local communities change that.\n\nIn 2017, the United States Department of Health and Human Services (HHS) declared the current opioid crisis a public health emergency as the number of [deaths involving opioids](https://www.drugabuse.gov/related-topics/trends-statistics/overdose-death-rates) in the United States skyrocketed from approximately 10,000 in 2002 to an estimated 49,000 in 2017.\nIn response to the crisis, the HHS released a [five-point strategy](https://www.hhs.gov/opioids/about-the-epidemic/hhs-response/index.html) for fighting the opioid epidemic. Among the key priorities you’d expect to see from a health crisis report (e.g. better prevention, treatment, and recovery services; better pain management) is **better data**, and they’ve turned to computer and data scientists for help.\n\n![US map of opioid epidemic](https://about.gitlab.com/images/blogimages/telesphora/us-map-crisis.jpg){: .medium.center}\n*\u003Csmall>In 2016, the number of overdose deaths involving opioids was 5 times higher than in 1999.\u003C/small>*\n\n## Designing for people\n\nJack Cackler is a machine learning specialist. Frank Lee is a pain management specialist. Under typical circumstances, these two may have never met. But when the HHS decided to hold an unprecedented [national opioid crisis code-a-thon](https://www.hhs.gov/challenges/code-a-thon/index.html), they didn’t just enlist developers – they brought in stakeholders from every side of the issue to develop data-driven solutions to combat the opioid epidemic across three tracks: treatment, usage, and prevention.\n\n[Origami Innovations](https://origamiinnovations.com/), a design, innovation, and solution lab powered by a Yale University students, was invited to the code-a-thon, bringing Cackler, Lee, and co-founders Matthew Erlendson, fourth-year medical student at Yale University and founder of Origami Innovations, and Dara Rouholiman, a digital health, data, and machine learning consultant together for the time. After winning the Treatment Track and receiving a $10,000 prize, they formed Telesphora, a human-centered data science platform.\n\n“One of the things that we were involved with was coming up with the core themes for the hackathon,” said Frank Lee, co-founder of Telesphora. “One of the ways that we do that is by human-centered design thinking.”\n\nHuman-centered design is an approach to design that considers the human perspective in every step of the problem-solving process. As Jack Cackler, co-founder at Telesphora, explains, “Sometimes, especially for those with a technical background, there’s a tendency to just focus on a technical solution. We really tried to get the story behind how this [opioid crisis] really impacted people.”\n\n> \"There’s a tendency to just focus on a technical solution. We really tried to get the story behind how this [opioid crisis] really impacted people.”\n\nCackler and team knew they wanted to design a human-centered solution. Discovering that the stigma of chronic opioid use was preventing treatment, they started asking questions:\n\n- *How might we treat this like a disease to reduce stigma, taking an empathetic approach similar to outbreaks of the flu or STDs?*\n- *How might we better predict community outbreaks?*\n- *How might we contain high mortality outbreaks, such as bad batches of drugs, to save lives in real time?*\n\n“We involved all the stakeholders in the crisis, which includes not only the providers, the scientists, and the administrators of the local and the state regions, but also the patients and families of patients who are affected by the overdose,” said Lee. “After doing a lot of brainstorming with these participants, we knew there needed to be better communication between first responders. We aimed our solution toward first responders and how they can help each other better allocate resources to help with the overdoses.”\n\n## Empathy over stigma\n\nOn June 23, 2016 in New Haven, Connecticut, where many on Cackler and Lee’s code-a-thon team called home, 12 patients, found within a one-block radius, were taken to Yale New Haven Hospital for opioid overdose. Three lost their lives due to a shortage of the drug Narcan (naloxone), a drug that can treat an opioid overdose to prevent death; the shelf life is short and the cost is high.\n\nPart of the problem, according to Lee and Cackler, is that there’s a common assumption that there’s a uniform distribution of overdoses, therefore, you can accommodate the demand. However, data analysis and conversations with first responders show that overdoses happen in spikes, like the event in New Haven.\n\n“There will be a new distribution channel of some opioid in some city. And then all of a sudden, you'll have a dozen, two dozen overdoses in a weekend, and there's just no way that the ambulances in the city can service that demand,” said Cackler. If the outbreak in New Haven could have been predicted, health agencies could have prepared and saved lives.\n\n![telesphora interface](https://about.gitlab.com/images/blogimages/telesphora/hhs1.png){: .medium.center}\n*\u003Csmall>Telesphora is a platform that uses real-time, open-access data and machine learning to predict where and when increases in opioid overdose and mortality will occur.\u003C/small>*\n\nThe solution Cackler, Lee, and the team came up with, now Telesphora, aimed to do just that. Using real-time data and future-trend data, they built a platform that empowers communities to predict outbreaks, increases access to treatment and resources, and reduces the stigma of opioid use.\n\n## Predictive analytics and user-friendly tools save lives\n\nKnowing that if an overdose outbreak is predicted before it happens, life-saving medicine can be allocated to the soon-to-be affected area to save lives, the Telesphora team used predictive analytics and user-friendly design to build a projection model and visualize the data.\n\n> \"If the outbreak in New Haven could have been predicted, health agencies could have prepared and saved lives.\"\n\nStarting with historical overdose data and network analysis of supply movements and overdoses, they created a spatiotemporal Poisson process to project future opioid overdose trends at any given space and time. The Poisson process takes real-time data and uses the geographic information, temporal information, and type of drug to predict the movement of opioids, alerting local responders and authorities of a potential overdose outbreak before it happens, bringing response time and mortality rate down.\n\n“The first alerts in this model come from neighboring cities in a flurry of mortality rate. Our tool with a geospatial analysis can predict the movement of spikes. When you see a spike in fentanyl in New Haven, CT, 4.8 days later you’ll see a spike happen in Fairfield,” Cackler explains.\n\n![machine learning explanation](https://about.gitlab.com/images/blogimages/telesphora/machine-learning.jpg){: .medium.center}\n*\u003Csmall>The machine learning model predicts the movement of outbreaks based on surrounding counties.\u003C/small>*\n\nWhen an outbreak is detected, it appears as a spike on the graph and the model can correlate that spike to different regions, alerting communities to how many days until that outbreak affects their area. The data visualization makes it easy for end users, like first responders, to digest the numbers and trends, showing the actual and predicted data across different regions, and the ability to filter by different drugs.\n\n“If we had this model a year before, events like what happened in New Haven could have been predicted. I think that’s really impactful and you can see in a tangible way how this is actionable,” said Cackler.\n\n*Are you using machine learning or human-centered design to build actionable solutions for the future? We want to hear from you! Email content@gitlab.com.*\n\nAll images courtesy of Telesphora\n{: .note}\n",[2108,9],"user stories",{"slug":2110,"featured":6,"template":680},"how-telesphora-is-tackling-the-opioid-crisis-machine-learning-human-centered-design","content:en-us:blog:how-telesphora-is-tackling-the-opioid-crisis-machine-learning-human-centered-design.yml","How Telesphora Is Tackling The Opioid Crisis Machine Learning Human Centered Design","en-us/blog/how-telesphora-is-tackling-the-opioid-crisis-machine-learning-human-centered-design.yml","en-us/blog/how-telesphora-is-tackling-the-opioid-crisis-machine-learning-human-centered-design",{"_path":2116,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2117,"content":2123,"config":2128,"_id":2130,"_type":14,"title":2131,"_source":16,"_file":2132,"_stem":2133,"_extension":19},"/en-us/blog/how-to-leverage-gitlab-duo-for-enhanced-security-reporting",{"title":2118,"description":2119,"ogTitle":2118,"ogDescription":2119,"noIndex":6,"ogImage":2120,"ogUrl":2121,"ogSiteName":693,"ogType":694,"canonicalUrls":2121,"schema":2122},"How to leverage GitLab Duo for enhanced security reporting","Learn how GitLab Duo enables efficient, real-world security reporting for development, operations, and security teams.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098339/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%285%29_1iy516k40hwBDChKcUJ2zb_1750098339103.png","https://about.gitlab.com/blog/how-to-leverage-gitlab-duo-for-enhanced-security-reporting","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to leverage GitLab Duo for enhanced security reporting\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valentine Mairet\"},{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2024-12-03\",\n      }",{"title":2118,"description":2119,"authors":2124,"heroImage":2120,"date":1831,"body":2126,"category":10,"tags":2127},[2125,1175],"Valentine Mairet","Good security reporting is crucial to maintain a good security posture because it provides detailed insights into incidents. With this information, organizations can better understand vulnerabilities, improve defenses, and prevent similar threats in the future. At GitLab, the [Security division](https://handbook.gitlab.com/handbook/security/#division-structure) has created use cases for GitLab Duo to improve reporting capabilities and enhance operational efficiency. \n\n## GitLab Duo’s security capabilities\n\nThe GitLab Security division uses GitLab’s built-in [incidents](https://docs.gitlab.com/ee/operations/incident_management/incidents.html) to manage and report on security incidents. Incidents are handled, documented, and resolved in GitLab, enabling the use of AI-driven [GitLab Duo](https://about.gitlab.com/gitlab-duo/) as an assistant when performing security operations like incident response. \n\nParticularly in incident analysis and reporting, GitLab Duo is highly efficient and accurate at creating proper documentation and is a great “pair programmer” when solving security incidents.\n\n## GitLab Duo features for security reporting\n\nGitLab Duo offers many features that enhance security reporting:\n\n- **Root Cause Analysis:** GitLab Duo can explain vulnerabilities and understand the context of an incident issue, making it an excellent assistant for performing root cause analyses of security incidents.\n- **Vulnerability Explanation:** Provides detailed insights into identified vulnerabilities, including potential exploitation methods and remediation steps. This feature aids developers and security analysts in understanding and addressing security issues effectively.\n- **Vulnerability Resolution:** Assists in fixing vulnerabilities by generating merge requests that address the identified issues, streamlining the remediation process.\n- **Code Explanation:** Helps users comprehend specific code segments by offering clear explanations, which is particularly useful when dealing with complex or unfamiliar codebases.\n- **Test Generation:** Facilitates early bug detection by generating tests for selected code, ensuring that security vulnerabilities are identified and addressed promptly.\n- **Refactor Code:** Suggests improvements or refactoring for selected code to enhance its quality and maintainability, contributing to a more secure codebase.\n- **Fix Code:** Identifies and rectifies quality issues such as bugs or typos in the selected code, helping maintain a robust and secure codebase.\n\n## Practical use cases\n\nFor the purpose of demonstrating practical use cases, the Security Incident Response Team created a dummy incident with following limited information:\n\n![Incident report](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098346/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098346297.png)\n\nSeveral comments were added as the team would normally proceed:\n\n![Comments added to report](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098346/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098346297.png)\n\n### Incident reporting\n\nGitLab Duo is able to comprehensively keep track of all information inside an incident issue, including the issue description, comments, and labels. When handling security incidents, information often is all over the place and can change over time. It can easily get lost or overlooked. GitLab Duo is excellent at finding relevant information again to create accurate incident reports. \n\nNavigate to your incident issue and open GitLab Duo Chat. You can engineer your prompt so that GitLab Duo takes your exact reporting requirements into account such as what sections you need and how they should be filled out. Here is an example of the prompt we use at GitLab Security:\n\n> Required sections:\n> - Executive Summary - bottom-line-up-front that is adequate for an audience like senior leadership and CISO\n> - Mitigations & Remediations\n> - Scope & Impact (Environments, customers, team members)\n> - Cause\n> - Detection Capabilities\n>\n> Based on the issue and the required section, write the security incident report. Flag any missing information. \n\nBelow is GitLab Duo’s output that we can use to create our extensive incident report:\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175975/Blog/zxiafech08ck9gqjubaz.png\" alt=\"root cause analysis output\" width=\"400\" height=\"1200\">\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175985/Blog/xjc9coah4oi7rz0daw2b.png\" alt=\"root cause analysis output\" width=\"400\" height=\"1200\">\n\n### Root Cause Analysis\n\nGitLab Duo is able to explain vulnerabilities and can understand the context of an incident issue. These capabilities make GitLab Duo an excellent assistant to perform root cause analyses of security incidents. \n\nAs seen above, GitLab Duo can write its own section on the cause of an incident based entirely on the issue contents. We can dig into this further with the assistance of GitLab Duo to identify several candidates for root cause(s).  \n\nOur prompt is built as such:\n\n> “The root cause of the incident was the exposure of a personal access token in CI job logs. The token was leaked in a project that was not included in the current list of projects for token detection, indicating a gap in the security monitoring process.”\n> Name a few potential root causes for a PAT finding itself in CI job logs.\n\nGitLab Duo’s response is the following:\n\n![GitLab Duo Root Cause Analysis response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098346/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098346298.png)\n\nGitLab Duo Root Cause Analysis can help security teams identify avenues worth investigating. It has a pretty good grasp on security concepts and how Root Cause Analysis is led in general, so it can be a great assistant for these situations where the exact root of the problem is unclear. \n\n> Learn more about [GitLab Duo Root Cause Analysis](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/).\n\n### On-demand updates\n\nSecurity incidents move fast and can be confusing, and information is constantly changing and shifting around. Higher severity incidents require a certain degree of thoroughness when it comes to reporting to senior leadership, especially when the audience is at the Director+ level. Engineers who are in the midst of a security incident can find it difficlut to condense the information such that it is digestable for executives. \n\nWe saw above that GitLab Duo is capable of delivering a pretty good executive summary. When the incident is ongoing, we need to deliver regular updates to senior leadership on the incident status and next steps. GitLab Duo is a great help for that, as well. If information is scattered across the issue in the form of a description or comments, GitLab Duo can help reassemble this information into the “bottom-line-up-front,” or BLUF summary, we need for executive updates. \n\nWe’ve taken the same incident right before token revocation and asked GitLab Duo for a BLUF summary where the audience is the Director of Security Operations. \n\n![Executive Summary - GitLab Duo](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098346/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098346299.png)\n\n## Getting started with GitLab Duo for security\n\nGitLab Security has automated several parts of the reporting process with the help of GitLab Duo. But to get started, all you need is access to GitLab Duo Chat. GitLab Duo Chat can be your well-informed assistant for many security reporting cases and post-mortem analyses.\n\n## What’s next for GitLab Duo?\n\nGitLab is committed to continuously enhancing GitLab Duo’s capabilities. Future developments aim to integrate AI-driven features more deeply into the security workflow, providing proactive detection and resolution of vulnerabilities, streamlined incident management, and comprehensive reporting tools. These advancements will further empower security teams to maintain robust security postures and respond effectively to emerging threats.\n\n> [Try GitLab Duo for 60 days for free](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/)!\n",[9,724,678,704,482],{"slug":2129,"featured":6,"template":680},"how-to-leverage-gitlab-duo-for-enhanced-security-reporting","content:en-us:blog:how-to-leverage-gitlab-duo-for-enhanced-security-reporting.yml","How To Leverage Gitlab Duo For Enhanced Security Reporting","en-us/blog/how-to-leverage-gitlab-duo-for-enhanced-security-reporting.yml","en-us/blog/how-to-leverage-gitlab-duo-for-enhanced-security-reporting",{"_path":2135,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2136,"content":2142,"config":2147,"_id":2149,"_type":14,"title":2150,"_source":16,"_file":2151,"_stem":2152,"_extension":19},"/en-us/blog/how-to-successfully-deliver-your-software-development-roadmap",{"title":2137,"description":2138,"ogTitle":2137,"ogDescription":2138,"noIndex":6,"ogImage":2139,"ogUrl":2140,"ogSiteName":693,"ogType":694,"canonicalUrls":2140,"schema":2141},"How to successfully deliver your software development roadmap","Here are three common blockers and how to overcome them to fully realize the ROI of a DevSecOps platform investment.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669103/Blog/Hero%20Images/AdobeStock_243118595.jpg","https://about.gitlab.com/blog/how-to-successfully-deliver-your-software-development-roadmap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to successfully deliver your software development roadmap\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2024-04-04\",\n      }",{"title":2137,"description":2138,"authors":2143,"heroImage":2139,"date":2144,"body":2145,"category":1156,"tags":2146},[1486],"2024-04-04","2024 is shaping up to be the year of DevSecOps, where more organizations realize the full potential of blending development, security, and operations through the adoption of a comprehensive platform. This is when teams will move beyond using just source code management (SCM) and tap into all the [AI-powered features](https://about.gitlab.com/topics/devops/the-role-of-ai-in-devops/) available across the software development lifecycle (SDLC), delivering better, more secure software faster. But first organizations have to knock down the blockers that can get in the way of successful [DevSecOps](https://about.gitlab.com/topics/devsecops/) adoption.\n\nIn talking to customers at organizations of all sizes, I've heard three main blockers:\n* The potential of AI is believable, but right now it seems limited to code creation and that has limited impact as there is more to the SDLC.\n* A platform seems like a great idea, but forcing my development, security, and operations team to give up their preferred tools all at once will undoubtedly cause a revolt. Yet, without everyone on the same platform, the investment is hard to justify.\n* Regulations and compliance makes it difficult to leverage a DevSecOps SaaS solution, and being on a multi-tenant solution is a non-starter for us as we are in a highly regulated industry. However, the overhead of self-hosting a DevSecOps platform is becoming untenable at our scale.\n\nWhile legitimate concerns, these blockers can be eliminated by combining DevSecOps practices and a platform approach. \"Making sure that we spend our money wisely is very, very important. GitLab allowed us to reduce our costs and centralize our work in one place. It’s been money well spent,\" says Andy Chow, Technology Chief of Staff at global fintech company Airwallex.\n\nLet's dig deeper into each blocker and see how it is resolved with a DevSecOps platform.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n### AI is not limited: It is having real impact across the SDLC\n\nWe know that AI is already improving the developer experience but there is so much more that AI can do across the entire SDLC. With AI, organizations can unburden development, security, and operations teams from tedious tasks by taking advantage of the efficiencies that AI provides. For instance, users can access summaries of comments in merge requests, have tests generated, refactor sections of code, and perform other time-saving actions.\n\nThat's why with [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI-powered workflows, we focus on more than just code creation — after all, [code creation only accounts for 25% of a developer's time](https://about.gitlab.com/blog/gitlab-global-devsecops-ai-report/). There is so much more that happens in the SDLC where AI can add efficiency. For instance, development, security, and operations teams that use AI-powered capabilities, such as Vulnerability Remediation and Root Cause Analysis, share that they can find and resolve vulnerabilities earlier and identify CI/CD pipeline failures faster and in a more collaborative manner.\n\n### Forget one-size-fits all, migrate your way\n\nRealizing the benefit of a DevSecOps platform is not a one-size-fits-all. You can customize your deployment to fit your organization's needs and where you are in your digital transformation journey — choosing one team at a time to adopt the platform or a full cutover. I have advice, though: Commit to using more than just source code management. A DevSecOps platform is a robust solution that includes enterprise agile planning, CI/CD, security and compliance, value stream analytics, and more. Also, make sure that as you deploy your platform, your users agree to get familiar with its range of capabilities — and not still maintain [a complex toolchain](https://about.gitlab.com/blog/battling-toolchain-technical-debt/).\n\nThe way to extract the most ROI and satisfaction from your migration is to show users how to get the functionality they had in their other tools from within the DevSecOps platform. To that end, we've increased our resources to support you. From [in-depth tutorials](https://about.gitlab.com/blog/tags/tutorial/) to [clear reference architectures](https://docs.gitlab.com/ee/administration/reference_architectures/), we have a vast library of content (including videos) for you to draw upon to help your users acclimate to and thrive in the DevSecOps environment. \n\nWe've also made it easier to onboarding teams, with capabilities like [remote development environments](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/), enabling organizations to reduce adoption friction. In fact, as more teams within your organization adopt GitLab, consider expanding access for other critical functions that contribute to delivering software value such as Finance, Legal, and Marketing teams. The power of a DevSecOps platform is giving everyone visibility into the SDLC which drives better collaboration, improves planning, reduces security risk, improves team velocity, and leads to faster time-to-value. This means your teams are happier and so are the users of the applications you build, secure, and deploy using GitLab.\n\n> Read how the U.S. Navy's Black Pearl [sped up onboarding using GitLab](https://about.gitlab.com/blog/u-s-navy-black-pearl-lessons-in-championing-devsecops/).\n\n**Note:** GitLab doesn't have to be introduced to the organization by the development team. For instance, if security teams want vulnerabilities identified and mitigated earlier in the lifecycle or increased compliance via security scanning, they can recommend that developers use the DevSecOps platform. [Read how U.K. retailer Dunelm made this happen](https://about.gitlab.com/customers/dunelm/).\n\n### Multi-tenancy is just one option; single-tenancy can address regulatory requirements\n\nKeeping software up-to-date and secure while maintaining compliance with strict regulations can make self-hosting a challenge. In June 2023, we launched [GitLab Dedicated](https://about.gitlab.com/dedicated/), our single-tenant SaaS solution, into general availability to address the needs of organizations in highly regulated industries like finance and healthcare, and in highly regulated geographies such as the European Union. GitLab Dedicated provides the secure environment organizations need for regulatory compliance, including control over data residency and isolation, while removing the overhead of self-hosting.\n\nDedicated customers are upgraded automatically every month, which means they have all of the benefits without the administration overhead. Furthermore, GitLab Dedicated comes with [GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/), enabling organizations to ship secure software faster with built-in compliance visibility and controls as well as advanced security scanning capabilities.\n\n> Learn the [origins of GitLab Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/) and how it has grown into the solution highly regulated organizations need.\n\n### Try GitLab today\n\nAs you proceed with your software development roadmap for 2024 (and beyond), consider what an AI-powered DevSecOps platform could do for your organization. Also keep an eye on [our Direction page](https://about.gitlab.com/direction/#fy25-rd-investment-themes) to learn about what’s coming next and [our monthly release posts](https://about.gitlab.com/blog/categories/devsecops-platform/) to learn about the latest and greatest available.\n\n> Start your trial of [GitLab Duo Pro](https://about.gitlab.com/gitlab-duo/#free-trial) or [GitLab Ultimate](https://gitlab.com/-/trials/new) for free today.\n",[9,703,482,678],{"slug":2148,"featured":91,"template":680},"how-to-successfully-deliver-your-software-development-roadmap","content:en-us:blog:how-to-successfully-deliver-your-software-development-roadmap.yml","How To Successfully Deliver Your Software Development Roadmap","en-us/blog/how-to-successfully-deliver-your-software-development-roadmap.yml","en-us/blog/how-to-successfully-deliver-your-software-development-roadmap",{"_path":2154,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2155,"content":2161,"config":2166,"_id":2168,"_type":14,"title":2169,"_source":16,"_file":2170,"_stem":2171,"_extension":19},"/en-us/blog/icymi-key-ai-and-security-insights-from-our-developer-community",{"title":2156,"description":2157,"ogTitle":2156,"ogDescription":2157,"noIndex":6,"ogImage":2158,"ogUrl":2159,"ogSiteName":693,"ogType":694,"canonicalUrls":2159,"schema":2160},"ICYMI: Key AI and security insights from our developer community","Our latest LinkedIn Live highlights the hottest trends in AI, security, DevSecOps, and more. Also get a taste of the GitLab community contributions that are making an impact.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098331/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%286%29_55zMmdJIUpfh5qaPW9dtVA_1750098331584.png","https://about.gitlab.com/blog/icymi-key-ai-and-security-insights-from-our-developer-community","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ICYMI: Key AI and security insights from our developer community\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2024-12-05\",\n      }",{"title":2156,"description":2157,"authors":2162,"heroImage":2158,"date":2163,"body":2164,"category":1156,"tags":2165},[698],"2024-12-05","In our [November LinkedIn Live broadcast](https://www.linkedin.com/feed/update/urn:li:activity:7265408726696697857), we brought together field CTOs, developer advocates, and community leaders to discuss industry trends and showcase features making a difference in developer workflows.\n\nHere are 5 key highlights:\n\n### 1. AI adoption trends from the field\nOur field CTOs shared insights on how organizations are embracing AI across their development workflows. For instance, Field CTO Cherry Han highlighted how financial organizations are thinking beyond individual developer tools.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035388263?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Ai Adoption Trends from the Field\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n\u003Cbr>\u003C/br>\nAndrew Hasker, Field CTO for Asia Pacific and Japan, offered valuable perspective on AI adoption.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035388277?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"From Field CTOs\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 2. Security coverage that makes a difference\n\nStaff Developer Advocate Fernando Diaz demonstrated how GitLab's security scanners cover the complete application lifecycle, showing how easy it is to implement [comprehensive security scanning](https://about.gitlab.com/solutions/security-compliance/) with just a few lines of code.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035388297?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Security Coverage\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 3. AI-powered language migration made simple\nIn an impressive demonstration, Senior Technical Marketing Manager Cesar Saavedra showed how GitLab Duo can assist in migrating applications between programming languages.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1036170482?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"AI-Powered Language Migration Made Simple\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 4. Making DevSecOps work smarter\n\nDeveloper Advocate Abubakar Siddiq Ango showcased how GitLab's triage features can automate routine tasks.\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035388290?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Making DEvOps Work Smarter\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 5. Community contributions making an impact\n\nDirector of Contributor Success Nick Veenhof shared how community contributions are shaping GitLab's development:\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035395211?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Community Contributions Making an Impact\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Watch on-demand\n\n[Watch the complete broadcast recording](https://www.linkedin.com/feed/update/urn:li:activity:7265408726696697857) for step-by-step demonstrations and insights from our experts. Also, be sure to [follow GitLab on LinkedIn](https://www.linkedin.com/company/gitlab-com) to stay up to date on our monthly broadcasts and get insights into our platform, DevSecOps, and software development.\n",[9,678,267,724,702],{"slug":2167,"featured":6,"template":680},"icymi-key-ai-and-security-insights-from-our-developer-community","content:en-us:blog:icymi-key-ai-and-security-insights-from-our-developer-community.yml","Icymi Key Ai And Security Insights From Our Developer Community","en-us/blog/icymi-key-ai-and-security-insights-from-our-developer-community.yml","en-us/blog/icymi-key-ai-and-security-insights-from-our-developer-community",{"_path":2173,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2174,"content":2179,"config":2185,"_id":2187,"_type":14,"title":2188,"_source":16,"_file":2189,"_stem":2190,"_extension":19},"/en-us/blog/improve-ai-security-in-gitlab-with-composite-identities",{"title":2175,"description":2176,"ogTitle":2175,"ogDescription":2176,"noIndex":6,"ogImage":758,"ogUrl":2177,"ogSiteName":693,"ogType":694,"canonicalUrls":2177,"schema":2178},"Improve AI security in GitLab with composite identities","Learn how to implement AI features responsibly by applying authentication best practices with service accounts for AI agents in GitLab.","https://about.gitlab.com/blog/improve-ai-security-in-gitlab-with-composite-identities","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Improve AI security in GitLab with composite identities\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Grzegorz Bizon\"}],\n        \"datePublished\": \"2025-01-29\",\n      }",{"title":2175,"description":2176,"authors":2180,"heroImage":758,"date":2182,"body":2183,"category":678,"tags":2184},[2181],"Grzegorz Bizon","2025-01-29","Artificial intelligence (AI) is quickly becoming the backbone of modern software development, fueling developer efficiency and accelerating innovation. With the emergence of AI agents implementing code based on instructions from humans, we are learning that implementing AI-based features has its own unique set of security challenges. **How do we protect access to the resources AI needs, protect confidentiality, and avoid privilege escalation**? Few organizations are ready to answer these questions today. At GitLab, we are. We are introducing a new paradigm for identity management: composite identities.\n\nWhen AI agents are integrated into your DevSecOps workflows, previously simple questions become difficult to answer: Who authored this code? Who is the author of this merge request? Who created this Git commit? We found we had to start asking new questions: Who instructed an AI agent to generate this code? What context did AI need to build this feature? What were the resources AI had to access and read to generate the answer?\n\nTo answer these questions, we need to understand some fundamental aspects of AI’s identity:\n\n* Does an AI agent have its own distinct identity?\n* What is the representation of this identity?\n* How do we make it all secure?\n\n### Authentication and AI identity management \n\nWe are at the beginning of a paradigm shift in identity management in the software delivery lifecycle. Before the AI era, identity management was simpler. We had human user-based identities and machine-type identities using separate accounts.\n\nWith the emergence of AI and agentic workflows, the distinction between these two core types of identities has blurred. AI agents are supposed to work in an autonomous way, so it makes sense to think about them as machine-type accounts. On the other hand, AI agents are usually being instructed by human users, and require access to resources the human users have access to in order to complete their tasks. This introduces significant security risks — for example, the AI may provide human users with information they should not have access to. How do we avoid privilege escalation, provide auditability, and protect confidentiality in a world with AI agents?\n\n### The solution: Composite identities\n\nA composite identity is our new identity [principal](https://cloud.google.com/iam/docs/overview#concepts_related_identity), representing an AI agent’s identity that is linked with the identity of a human user who requests actions from the agent. **This enhances our ability to protect resources stored in GitLab**. Whenever an AI agent with a composite identity attempts to access a resource, we will not only authenticate the agent itself, but also link its principal with a human user who is instructing the agent, and will try to authorize both principals before granting access to a resource. Both principals need access; otherwise, the access will be denied. If an AI agent by itself can access a project, but a human user who instructed the agent to do so cannot, GitLab will deny the access.\n\nThe inverse is true as well — if a human user can access a confidential issue, but an AI agent can’t, then its service account will not be able to read the issue. We authorize access to every API request and for each resource an agent attempts to access this way. Composite identity without a request-scoped link to a human account will not be authorized to access any resource. For fully autonomous workloads we are also considering adding support for linking composite identities with other principals.\n\n#### Composite identity and service accounts\n\nWe redesigned our authorization framework to support composite identities, allowing multiple principals to be evaluated simultaneously when determining access rights to a resource. We enhanced our security infrastructure by implementing scoped identities across our entire system — from API requests to CI jobs and backend workers. These identities are linked to an AI agent's composite identity account also through OAuth tokens and CI job tokens. This project yielded unexpected security benefits, particularly in GitLab CI, where we upgraded job tokens to signed JSON web tokens (JWTs). Additionally, we contributed code to several open source libraries to add support for scoped identities.\n\n### Composite identity with GitLab Duo with Amazon Q\n\nIn the GitLab 17.8 release, we made composite identity for service accounts support available for customers through our GitLab Duo with Amazon Q integration. Amazon Q Developer agent will have composite identity enforced, which will protect your confidential GitLab resources from unauthorized access.\n\n### What’s next?\n\nTo learn more, check out our [composite identity docs](https://docs.gitlab.com/ee/development/ai_features/composite_identity.html).",[9,703,678],{"slug":2186,"featured":6,"template":680},"improve-ai-security-in-gitlab-with-composite-identities","content:en-us:blog:improve-ai-security-in-gitlab-with-composite-identities.yml","Improve Ai Security In Gitlab With Composite Identities","en-us/blog/improve-ai-security-in-gitlab-with-composite-identities.yml","en-us/blog/improve-ai-security-in-gitlab-with-composite-identities",{"_path":2192,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2193,"content":2199,"config":2205,"_id":2207,"_type":14,"title":2208,"_source":16,"_file":2209,"_stem":2210,"_extension":19},"/en-us/blog/inside-look-how-gitlabs-test-platform-team-validates-ai-features",{"title":2194,"description":2195,"ogTitle":2194,"ogDescription":2195,"noIndex":6,"ogImage":2196,"ogUrl":2197,"ogSiteName":693,"ogType":694,"canonicalUrls":2197,"schema":2198},"Inside look: How GitLab's Test Platform team validates AI features","Learn how we continuously analyze AI feature performance, including testing latency worldwide, and get to know our new AI continuous analysis tool.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099033/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2811%29_78Dav6FR9EGjhebHWuBVan_1750099033422.png","https://about.gitlab.com/blog/inside-look-how-gitlabs-test-platform-team-validates-ai-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside look: How GitLab's Test Platform team validates AI features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Lapierre\"},{\"@type\":\"Person\",\"name\":\"Vincy Wilson\"}],\n        \"datePublished\": \"2024-06-03\",\n      }",{"title":2194,"description":2195,"authors":2200,"heroImage":2196,"date":1275,"body":2203,"category":10,"tags":2204},[2201,2202],"Mark Lapierre","Vincy Wilson","AI is increasingly becoming a centerpiece of software development - many companies are integrating it throughout their DevSecOps workflows to improve productivity and increase efficiency. Because of this now-critical role, AI features should be tested and analyzed on an ongoing basis. In this article, we take you behind the scenes to learn how [GitLab's Test Platform team](https://handbook.gitlab.com/handbook/engineering/infrastructure/test-platform/) does this for [GitLab Duo](https://about.gitlab.com/gitlab-duo/) features by conducting performance validation, functional readiness, and continuous analysis across GitLab versions. With this three-pronged approach, GitLab aims to ensure that GitLab Duo features are performing optimally for our customers.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n## AI and testing\n\nAI's non-deterministic nature, where the same input can produce different outputs, makes ensuring a great user experience a challenge. So, when we integrated AI deep into the GitLab DevSecOps Platform, we had to adapt to our best practices to address this challenge. \n\nThe [Test Platform team's mission ](https://handbook.gitlab.com/handbook/engineering/infrastructure/test-platform/) is to help enable the successful development and deployment of high-quality software applications with continuous analysis and efficiency to help ensure customer satisfaction. The key to achieving this is by delivering tools that help increase standardization, repeatability, and test consistency. \n\nApplying this to GitLab Duo, our AI suite of tools to power DevSecOps workflows, means being able to continuously analyze its performance and identify opportunities for improvement. Our goal is to gain clear, actionable insights that will help us to enhance GitLab Duo's capabilities and, as a result, better meet our customers' needs. \n\n## The need for continuous analysis of AI\n\nTo continuously assess GitLab Duo, we needed a mechanism for analyzing feature performance across releases. Therefore, we created an AI continuous analysis tool to automate the collection and analysis of data to achieve this. \n\n![diagram of how the AI continuous analysis tool works](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099041/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099041503.png)\n\n\u003Ccenter>\u003Ci>How the AI continuous analysis tool works\u003C/i>\u003C/center>\n\n### Building the AI continuous analysis tool\n\nTo gain detailed, user-centric insights, we needed to gather data in the appropriate context – in this case, the integrated development environment (IDE), as it is where most of our users access GitLab Duo. We narrowed this down further by opting for the Visual Studio Code IDE, a popular choice within our community. Once the environment was chosen, we automated entering code prompts and recording the provided suggestions. The interactions with the IDE are handled by the [WebdriverIO VSCode service](https://github.com/webdriverio-community/wdio-vscode-service), and CI operations are handled through [GitLab CI/CD](https://docs.gitlab.com/ee/ci/). This automation significantly scaled up data collection and eliminated repetitive tasks for GitLab team members. To start, we have focused on measuring the performance of GitLab Duo Code Suggestions, but plan to expand to other GitLab AI features in the future.\n\n### Analyzing the data\n\nAt the core of our AI continuous analysis tool is a mechanism for collecting and analyzing code suggestions. This involves automatically entering code prompts, recording the suggestions provided, and logging timestamps of relevant events. We measure the time from when the tool provides an input until a suggestion is displayed in the UI. In addition, we record the logs created by the IDE, which report the time it took for each suggestion response to be received. With this data, we can compare the latency of suggestions in terms of how long it takes the backend AI service to send a response to the IDE, and how long it takes for the IDE to display the suggestion for the user. We then can compare latency and other metrics of GitLab Duo features across multiple releases. The GitLab platform has the ability to analyze [code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html) and [application security](https://docs.gitlab.com/ee/user/application_security/), so we leverage these capabilities to enable the AI continuous analysis tool to analyze the quality and security of the suggestions provided by GitLab Duo.\n\n### Improving AI-driven suggestions\n\nOnce the collected data is analyzed, the tool automatically generates a single report summarizing the results. The report includes key statistics (e.g., mean latency and/or latency at various percentiles), descriptions of notable differences or patterns, links to raw data, and CI/CD pipeline logs and artifacts. The tool also records a video of each prompt and suggestion, which allows us to review specific cases where differences are highlighted. This creates an opportunity for the UX researchers and development teams to take action on the insights gained, helping to improve the overall user experience and system performance.\n\nThe tool is at an early stage of development, but it's already helped us to improve the experience for GitLab Duo Code Suggestions users. Moving forward, we plan to expand our tool’s capabilities, incorporate more metrics and consume and provide input to our [Centralized Evaluation Framework](https://about.gitlab.com/direction/ai-powered/ai_framework/ai_evaluation/), which validates AI models, to enhance our continuous analysis further.\n\n## Performance validation\n\nAs AI has become integral to GitLab's offerings, optimizing the performance of AI-driven features is essential. Our performance tests aim to evaluate and monitor the performance of our GitLab components, which interact with AI service backends. While we can monitor the performance of these external services as part of our production environment's observability, we cannot control them. Thus, including third-party services in our performance testing would be expensive and yield limited benefits. Although third-party AI providers contribute to overall latency, the latency attributable to GitLab components is still important to check. We aim to detect changes that might lead to performance degradation by monitoring GitLab components. \n\n### Building AI performance validation test environment\n\nIn our AI test environments, the [AI Gateway](https://docs.gitlab.com/ee/architecture/blueprints/ai_gateway/#summary), which is a stand-alone service to give access to AI features to GitLab users, has been configured to return mocked responses, enabling us to test the performance of AI-powered features without interacting with third-party AI service providers. We conduct AI performance tests on [reference architecture environments of various sizes](https://docs.gitlab.com/ee/administration/reference_architectures/). Additionally, we evaluate new tests in their own isolated environment before they're added to the larger environments.\n\n### Testing multi-regional latency\n\nMulti-regional latency tests need to be run from various geolocations to validate that requests are being served from a suitable location close to the source of the request. We do this today with the use of the [GitLab Environment Toolkit](https://gitlab.com/gitlab-org/gitlab-environment-toolkit). The toolkit provisions an environment in the identified region to test (note: both the AI Gateway and the provisioned environment are in the same region), then uses the [GitLab Performance Tool](https://gitlab.com/gitlab-org/quality/performance) to run tests to measure time to first byte (TTFB). TTFB is our way of measuring time to the first part of the response being rendered, which contributes to the perceived latency that a customer experiences. To account for this measurement, our tests have a check to help ensure that the [response itself isn't empty](https://gitlab.com/gitlab-org/quality/performance/-/blob/cee8bef023e590e6ca75828e49f5c7c596581e06/k6/tests/experimental/api_v4_code_suggestions_generation_streaming.js#L70). \n\nOur tests are expanding further to continue to measure perceived latency from a customer’s perspective. We have captured a set of baseline response times that indicate how a specific set of regions performed when the test environment was in a known good state. These baselines allow us to compare subsequent environment updates and other regions to this known state to evaluate the impact of changes. These baseline measurements can be updated after major updates to ensure they stay relevant in the future. \n\nNote: As of this article's publication date, we have AI Gateway deployments across the U.S., Europe, and Asia. To learn more, visit our [handbook page](https://handbook.gitlab.com/handbook/engineering/development/data-science/ai-powered/ai-framework/#-aigw-region-deployments).\n\n## Functionality\n\nTo help continuously enable customers to confidently leverage AI reliably, we must continuously work to ensure our AI features function as expected.\n\n### Unit and integration tests\n\nFeatures that leverage AI models still require rigorous automated tests, which help engineers develop new features and changes confidently. However, since AI features can involve integrating with third-party AI providers, we must be careful to stub any external API calls to help ensure our tests are fast and reliable.\n\nFor a comprehensive look at testing at GitLab, look at our [testing standards and style guidelines](https://docs.gitlab.com/ee/development/testing_guide/). \n\n### End-to-end tests \n\nEnd-to-end testing is a strategy for checking whether the application works as expected across the entire software stack and architecture. We've implemented it in two ways for GitLab Duo testing: using real AI-generated responses and mock-generated AI responses.\n\n![validating features - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099041/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099041504.png)\n\n\u003Ccenter>\u003Ci>End-to-end test workflow\u003C/i>\u003C/center>\n\n#### Using real AI-generated responses\n\nAlthough costly, end-to-end tests are important to help ensure the entire user experience functions as expected. Since AI models are non-deterministic, end-to-end test assertions for validating real AI-generated responses should be loose enough to help ensure the feature functions without relying on a response that may change. This might mean an assertion that checks for some response with no errors or for a response we are certain to receive.\n\nAI-driven functionality is not accessible only from within the GitLab application, so we must also consider user workflows for other applications that leverage these features. For example, to cover the use case of a developer requesting code suggestions in [IntelliJ IDEA](https://www.jetbrains.com/idea/) using the GitLab Duo plugin, we need to drive the IntelliJ application to simulate a user workflow. Similarly, to ensure that the GitLab Duo Chat experience is consistent in VS Code, we must drive the VS Code application and exercise the GitLab Workflow extension. Working to ensure these workflows are covered helps us maintain a consistently great developer experience across all GitLab products. \n\n#### Using mock AI-generated responses\n\nIn addition to end-to-end tests using real AI-generated responses, we run some end-to-end tests against test environments configured to return mock responses. This allows us to verify changes to GitLab code and components that don’t depend on responses generated by an AI model more frequently.\n\n> For a closer look at end-to-end testing, read our [end-to-end testing guide](https://docs.gitlab.com/ee/development/testing_guide/end_to_end/). \n\n### Exploratory testing and dogfooding\n\nAI features are built by humans for humans. At GitLab, exploratory testing and dogfooding greatly benefit us. GitLab team members are passionate about what features get shipped, and insights from internal usage are invaluable in shaping the direction of AI features.\n\n[Exploratory testing](https://about.gitlab.com/topics/devops/devops-test-automation/#test-automation-stages) allows the team to creatively exercise features to help ensure edge case bugs are identified and resolved. Dogfooding encourages team members to use AI features in their daily workflows, which helps us identify realistic issues from realistic users. For a comprehensive look at how we dogfood AI features, look at [Developing GitLab Duo: How we are dogfooding our AI features](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features/).\n\n## Get started with GitLab Duo\nHopefully this article gives you insight into how we are validating AI features at GitLab. We have integrated our team's process into our overall development as we iterate on GitLab Duo features. We encourage you to try GitLab Duo in your organization and reap the benefits of AI-powered workflows.\n\n> Start a [free trial of GitLab Duo](https://about.gitlab.com/gitlab-duo/#free-trial) today!\n\n_Members of the GitLab Test Platform team contributed to this article._\n",[9,704,482,1036,932,1037],{"slug":2206,"featured":91,"template":680},"inside-look-how-gitlabs-test-platform-team-validates-ai-features","content:en-us:blog:inside-look-how-gitlabs-test-platform-team-validates-ai-features.yml","Inside Look How Gitlabs Test Platform Team Validates Ai Features","en-us/blog/inside-look-how-gitlabs-test-platform-team-validates-ai-features.yml","en-us/blog/inside-look-how-gitlabs-test-platform-team-validates-ai-features",{"_path":2212,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2213,"content":2219,"config":2225,"_id":2227,"_type":14,"title":2228,"_source":16,"_file":2229,"_stem":2230,"_extension":19},"/en-us/blog/introducing-the-gitlab-ai-transparency-center",{"title":2214,"description":2215,"ogTitle":2214,"ogDescription":2215,"noIndex":6,"ogImage":2216,"ogUrl":2217,"ogSiteName":693,"ogType":694,"canonicalUrls":2217,"schema":2218},"Introducing the GitLab AI Transparency Center","This new initiative will help our community understand how we uphold governance and transparency in our AI products.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098448/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945_4YvWyVQu8Q1g31ZVjlDOkH_1750098447812.png","https://about.gitlab.com/blog/introducing-the-gitlab-ai-transparency-center","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing the GitLab AI Transparency Center\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Robin Schulman\"}],\n        \"datePublished\": \"2024-04-11\",\n      }",{"title":2214,"description":2215,"authors":2220,"heroImage":2216,"date":2222,"body":2223,"category":10,"tags":2224},[2221],"Robin Schulman","2024-04-11","GitLab is dedicated to responsibly integrating artificial intelligence (AI) throughout our comprehensive DevSecOps platform. We offer GitLab Duo, a [full suite of AI capabilities](https://about.gitlab.com/gitlab-duo/) across the GitLab platform, so that our customers can ship better, more secure software faster. GitLab Duo follows a privacy- and transparency-first approach to help customers confidently adopt AI while keeping their valuable assets protected.\n\nGenerative AI is moving so quickly and we know it presents a host of novel questions about the privacy and safety of this technology. In GitLab's [2023 State of AI in Software Development report](https://about.gitlab.com/developer-survey/#ai), more than 75% of respondents expressed concern about AI tools having access to private information or intellectual property. \n\n[Transparency is a core value at GitLab](https://handbook.gitlab.com/handbook/values/#transparency), and we take a transparency- and privacy-first approach to building our AI features to help ensure that our customers’ valuable intellectual property is protected. Accordingly, we’ve launched our [AI Transparency Center](https://about.gitlab.com/ai-transparency-center/) to help GitLab’s customers, community, and team members better understand the ways in which GitLab upholds ethics and transparency in our AI-powered features.\n\nThe AI Transparency Center includes GitLab’s [AI Ethics Principles for Product Development](https://handbook.gitlab.com/handbook/legal/ethics-compliance-program/ai-ethics-principles/), [AI Continuity Plan](https://handbook.gitlab.com/handbook/product/ai/continuity-plan/), and our [AI features documentation](https://docs.gitlab.com/ee/user/ai_features.html).\n\n## The AI Ethics Principles for Product Development explained\n\nWe believe ethics play an important role in building AI features. For this reason, we’ve launched GitLab’s [AI Ethics Principles for Product Development](https://handbook.gitlab.com/handbook/legal/ethics-compliance-program/ai-ethics-principles/) to address what we consider to be the best practices in responsible AI development. These Principles will help guide GitLab as we continue to build and evolve our AI functionality.\n\nThe Principles specifically address five key areas of concern that GitLab monitors so that we can continue to responsibly integrate AI into our customers’ workflows:\n\n- **Avoiding unfair bias.** [Diversity, Inclusion, and Belonging](https://about.gitlab.com/company/culture/inclusion/) is also one of GitLab’s core values. It is a critical consideration when building features powered by AI systems, as there is [evidence](https://fra.europa.eu/en/publication/2022/bias-algorithm) that AI systems may perpetuate human and societal biases. GitLab will continue to prioritize Diversity, Inclusion, and Belonging when building AI features.\n\n- **Safeguarding against security risks.** GitLab is a DevSecOps platform, which means we integrate security throughout our entire product, including in our AI features. While AI brings many potential security benefits, it can also create security risks if not deployed correctly. As we do with all of our features, our goal is to mitigate these security risks in GitLab’s AI features.\n\n- **Preventing potentially harmful uses.** We strive to build AI features responsibly. We try to carefully consider the potential consequences of our AI features in order to refrain from launching features that are likely to cause, or allow others to cause, overall harm.\n\n- **Considering what data our AI features use and how they use it.** We will continue to carefully evaluate the data that our AI features use, the purposes for which we’re using this data, and who owns the intellectual property and other rights to the data, just as we do with all of GitLab’s features.\n\n- **Holding ourselves accountable.** GitLab’s mission is to make it so that [everyone can contribute](https://about.gitlab.com/company/mission/), and we welcome feedback from the GitLab community about our AI features. We will in turn aim to share our AI ethics-related findings with others in the industry where possible. We also know that AI systems, and the risk mitigations we need to employ with them, will change over time, so we are committed to continuously reviewing and iterating on our AI features and these Principles.\n\n## The AI Continuity Plan explained\n\nUnlike other DevSecOps platforms, GitLab is not tied to a single AI model provider. Instead, our AI features are powered by a diverse set of models, which helps us support a wide range of use cases and gives our customers flexibility.\n\nWe carefully select our third-party AI vendors to ensure a commitment from the vendor that they will forgo the use of GitLab and GitLab customers’ content for the developing, training, and fine tuning of vendor models.\n\nOur new [AI Continuity Plan](https://handbook.gitlab.com/handbook/product/ai/continuity-plan/) lays out GitLab’s processes when reviewing and selecting new third-party AI vendors, and when these AI vendors materially change their practices with respect to customer data.\n\n## AI features documentation \n\nIn keeping with GitLab’s core Transparency value, our [AI features documentation](https://docs.gitlab.com/ee/user/ai_features.html) clearly outlines our AI features’ purposes, underlying models, statuses, and privacy practices.\n\n## Visit the AI Transparency Center\n\nThe [AI Transparency Center](https://about.gitlab.com/ai-transparency-center/) is publicly available in keeping with our [Transparency value](https://handbook.gitlab.com/handbook/values/#transparency) and to encourage others in the AI industry and the GitLab community to take safety, privacy, and ethics into account when building their own AI-powered functionality.\n\nWe’re excited about the opportunities that responsible AI will bring, and will continue to build our AI features with ethics, privacy, and transparency in mind.\n",[9,1036,1135],{"slug":2226,"featured":6,"template":680},"introducing-the-gitlab-ai-transparency-center","content:en-us:blog:introducing-the-gitlab-ai-transparency-center.yml","Introducing The Gitlab Ai Transparency Center","en-us/blog/introducing-the-gitlab-ai-transparency-center.yml","en-us/blog/introducing-the-gitlab-ai-transparency-center",{"_path":2232,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2233,"content":2239,"config":2245,"_id":2247,"_type":14,"title":2248,"_source":16,"_file":2249,"_stem":2250,"_extension":19},"/en-us/blog/introducing-the-source-insights-for-the-future-of-software-development",{"title":2234,"description":2235,"ogTitle":2234,"ogDescription":2235,"noIndex":6,"ogImage":2236,"ogUrl":2237,"ogSiteName":693,"ogType":694,"canonicalUrls":2237,"schema":2238},"Introducing The Source: Insights for the future of software development","Explore our new publication for transformative software development strategies and expert advice on emerging technologies.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674616/Blog/Hero%20Images/blog-image-template-1800x945__1_.png","https://about.gitlab.com/blog/introducing-the-source-insights-for-the-future-of-software-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing The Source: Insights for the future of software development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chandler Gibbons\"}],\n        \"datePublished\": \"2024-10-29\",\n      }",{"title":2234,"description":2235,"authors":2240,"heroImage":2236,"date":2242,"body":2243,"category":1135,"tags":2244},[2241],"Chandler Gibbons","2024-10-29","Modern software development is transforming the way organizations create, deliver, and scale business value. Teams must be able to build solutions quickly and efficiently while navigating rising security threats, emerging technologies, and increasingly complex compliance demands.\n\nToday, GitLab is launching [The Source](https://about.gitlab.com/the-source/), a new publication that covers the evolution of software development as an engine for business success. We offer regular insights into the future of software development, supported by original research and analysis from our subject matter experts and thought leaders.\n\nOn The Source, you will find answers to questions such as:\n* How can leaders measure the ROI of AI across the software development lifecycle?\n* What’s the best way to ensure security and compliance across the entire software supply chain?\n* What types of efficiencies will teams see from platform and toolchain consolidation?\n\nHere’s a sample of what's on The Source today:\n\n**4 steps for measuring the impact of AI**\n\n\"Evaluating the productivity of AI-enhanced coding requires a more nuanced approach than traditional metrics such as lines of code, code commits, or task completion. It necessitates shifting the focus to real-world business outcomes that balance development speed, software quality, and security.\"\n- [Learn the 4 steps from AI expert Taylor McCaslin.](https://about.gitlab.com/the-source/ai/4-steps-for-measuring-the-impact-of-ai/)\n\n**Addressing the root cause of common security frustrations**\n\n\"DevSecOps promises better integration between engineering and security, but it’s clear that frustrations and misalignment persist. That’s because these challenges are symptoms of a larger problem with how organizations view security, as well as how teams work together and how they allocate time to security.\"\n- [Solve this disconnect with expert advice from GitLab CISO Josh Lemos.](https://about.gitlab.com/the-source/security/security-its-more-than-culture-addressing-the-root-cause-of-common-security/)\n\n**Driving business results with platform engineering**\n\n\"Platform engineering aims to normalize and standardize developer workflows by providing developers with optimized 'golden paths' for most of their workloads and flexibility to define exceptions for the rest.\"\n- [Discover GitLab Field CTO Brian Wald's best practices for platform engineering success.](https://about.gitlab.com/the-source/platform/driving-business-results-with-platform-engineering/)\n\n## Make The Source your decision-making partner\n\nVisit [The Source](https://about.gitlab.com/the-source/) today to explore the latest insights, get answers to your leadership questions, and learn something new to share with your teams. You can also subscribe to our newsletter for regular updates directly to your inbox. Join our community of forward-thinking technology leaders and help shape the future of software development.",[9,678,1135,703],{"slug":2246,"featured":91,"template":680},"introducing-the-source-insights-for-the-future-of-software-development","content:en-us:blog:introducing-the-source-insights-for-the-future-of-software-development.yml","Introducing The Source Insights For The Future Of Software Development","en-us/blog/introducing-the-source-insights-for-the-future-of-software-development.yml","en-us/blog/introducing-the-source-insights-for-the-future-of-software-development",{"_path":2252,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2253,"content":2259,"config":2264,"_id":2266,"_type":14,"title":2267,"_source":16,"_file":2268,"_stem":2269,"_extension":19},"/en-us/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment",{"title":2254,"description":2255,"ogTitle":2254,"ogDescription":2255,"noIndex":6,"ogImage":2256,"ogUrl":2257,"ogSiteName":693,"ogType":694,"canonicalUrls":2257,"schema":2258},"Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment","Learn how to migrate from Jenkins to the integrated CI/CD of the GitLab DevSecOps Platform to deliver high-quality software rapidly.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663000/Blog/Hero%20Images/tanukilifecycle.png","https://about.gitlab.com/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-11-01\",\n      }",{"title":2254,"description":2255,"authors":2260,"heroImage":2256,"date":2261,"body":2262,"category":1156,"tags":2263},[1735],"2023-11-01","\nIn today's dynamic landscape of software development, certain requirements have become paramount for delivering high-quality software rapidly. These requirements include the need for cloud compatibility, faster development cycles, improved collaboration, containerization, enhanced development experiences, and the integration of AI-driven capabilities for better efficiency and speed. Jenkins, a longstanding and respected continuous integration (CI) tool, has admirably played a role in many teams' software development for years. However, as more teams adopt DevOps/DevSecOps strategies for their software delivery, leveraging the integrated CI that is available in a DevSecOps platform like GitLab can provide benefits that Jenkins does not. \n\nSome organizations find themselves hesitating to migrate, not because they doubt the benefits of a top-tier [CI/CD](https://about.gitlab.com/topics/ci-cd/) solution such as GitLab, but due to the complexities of their existing Jenkins implementations. It's understandable that such a transition can seem daunting. \n\nIn this blog, you'll find several migration strategies to help transition from Jenkins to GitLab and make the process smoother and more manageable.\n\n## Migrating to GitLab\nIt's become evident that for organizations seeking a CI/CD solution that can seamlessly support their evolving demands, GitLab emerges as a powerful game-changer. Let's explore why transitioning to this advanced platform is transformative for Jenkins users.\n\n### Why migrate to GitLab \nBefore we delve into the migration approaches, let's take a moment to understand GitLab CI and what makes it a compelling choice for modern CI/CD needs.\n\n> Try GitLab CI/CD today with [a free trial of Ultimate](https://gitlab.com/-/trials/new).\n\n### GitLab CI overview\nGitLab CI is an integral part of the GitLab [AI-powered](https://about.gitlab.com/gitlab-duo/) DevSecOps Platform, which offers a comprehensive and unified solution for DevSecOps and CI/CD. GitLab's design revolves around streamlining development workflows, fostering collaboration, enhancing security, and ensuring scalability.\n\n### Key features of GitLab CI\nThese are the key features of GitLab CI:\n- **Unified platform:** GitLab CI is more than just a CI/CD tool; it's part of a broader ecosystem that includes source code management, project management, security features, analytics and more. This unified platform streamlines workflows and enhances collaboration among development teams.\n- **Containerization and orchestration:** GitLab CI/CD is designed with containerization in mind, offering native support for Docker and Kubernetes. This enables seamless integration of container technologies into your CI/CD pipelines.\n- **Security by design:** Security is a top priority, and GitLab CI incorporates features such as static code analysis and vulnerability scanning to help teams identify and address security issues early in the development process.\n- **GitOps principles:** GitLab CI aligns with [GitOps principles](https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab/), emphasizing version-controlled, declarative configurations for infrastructure and application deployments. This approach enhances the reliability and repeatability of deployments.\n\nGet familiar with GitLab CI with this tutorial:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/WKR-7clknsA?si=T21Fe10Oa0rQ0SGB\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWith that understanding of GitLab CI's capabilities, let's explore the migration steps and strategies for Jenkins users looking to leverage the benefits of GitLab CI.\n\n## A recommended step-by-step Jenkins-to-GitLab CI migration\nWhen considering a migration from Jenkins to GitLab CI, we strongly recommend following a well-structured, step-by-step approach to ensure a seamless transition. Here's our recommended process:\n1. **Pipeline assessment:** Start by conducting a comprehensive inventory of all your existing pipelines in Jenkins. This initial step will help you gain a clear understanding of the scope and complexity of the migration.\n2. **Parallel migration:** Begin the migration process by selecting individual pipelines and moving them to GitLab CI one at a time. Continue to maintain the use of Jenkins for your ongoing work during this transition to minimize disruptions.\n3. **Code verification:** We advise beginning with verification checks in CI. Run both the Jenkins and GitLab CI pipelines in parallel. This dual approach allows you to directly compare the two workflows and identify any issues in the new GitLab workflows. During this phase, keep the GitLab workflow as an optional choice while Jenkins remains required.\n4. **Continuous validation:** After running both pipelines in parallel for a full iteration, thoroughly evaluate the outcomes from each pipeline. This evaluation should consider various factors, including status codes, logs, and performance. \n5. **GitLab CI transition:** As you gain confidence in the reliability and effectiveness of GitLab CI through the parallel runs, make the transition to the GitLab CI workflow as the required standard while Jenkins continues to operate in the background.\n6. **Jenkins phaseout:** After a second iteration, when you are confident in the performance and stability of GitLab CI, you can begin to remove the Jenkins job from your code verification pipeline. This successful transition will enable you to retire Jenkins from this particular aspect of your CI/CD process.\n\nThis recommended approach ensures that your migration is a gradual evolution, allowing you to identify and address any issues or discrepancies before fully committing to GitLab CI. Running Jenkins and GitLab CI pipelines in parallel provides valuable insights and ensures the effective streamlining of your CI/CD processes.\n\n## Preparing for migration: Training and communication\nTo ensure a smooth and successful migration from Jenkins to GitLab CI, follow these essential steps:\n- **Stakeholder communication:** Start by announcing your migration plans and timelines to all relevant stakeholders. This includes DevOps teams, developers, and QA engineers. Transparency in communication is crucial to ensure that everyone understands the objectives and expectations of the migration.\n- **Knowledge-level training:** Conduct knowledge-level training sessions for your teams to promote GitLab CI adoption.\nCover topics such as using GitLab CI, understanding the YAML syntax, and how to create a basic pipeline.\nProvide team members with the knowledge and skills necessary to navigate the new GitLab CI environment effectively.\n- **Hands-on learning:** Encourage hands-on learning by pairing up developers.\nCreate opportunities for them to learn from each other's experiences throughout the migration process.\n\nBy following these instructions for training and communication, you'll build a strong foundation for a successful migration, empowering your teams to adapt and thrive in the new environment.\n\n## 3 Jenkins-to-GitLab CI migration strategies\nThere are different strategies to consider. These three strategies offer flexibility, allowing organizations to choose the path that best aligns with their specific needs and resources. Let's explore these strategies in detail to help you make an informed decision about which one suits your organization best.\n\n### Migration Strategy 1: Using GitLab CI for new projects\nThe first migration strategy involves a gradual transition. While you maintain your existing Jenkins infrastructure for ongoing projects, you introduce GitLab CI for new projects. This approach allows you to harness the modern features of GitLab CI without disrupting your current work.\n\n#### Benefits of Migration Strategy 1\nThe benefits of this approach include the following:\n- New projects can leverage GitLab CI's advanced features right from the start. \n- This strategy minimizes the risk of disrupting existing workflows, as your existing Jenkins setup remains intact.\n- Your team can gradually adapt to GitLab CI, building confidence and expertise without the pressure of an immediate full-scale migration.\n\n#### Challenges of Migration Strategy 1\nThe challenges of this approach include the following:\n- Operating two CI/CD platforms simultaneously can introduce complexity, especially in terms of integration and team collaboration.\n- Managing projects on different platforms may require careful coordination to ensure consistency in processes and security practices.\n\nThis strategy offers a smooth and manageable transition by allowing you to harness GitLab CI's strengths for new projects, while your existing Jenkins infrastructure continues to support ongoing work.\n\n### Migration Strategy 2: Migrating only strategic projects\nIn this strategy, you identify specific projects within your organization that stand to benefit the most from the capabilities of GitLab CI. Instead of preparing for a wholesale migration, you start by focusing your efforts on migrating these strategically selected projects first.\n\n#### Benefits of Migration Strategy 2\nThe benefits of this approach include the following:\n- By concentrating on key projects, you can realize significant improvements in those areas where GitLab CI aligns with specific needs.\n- This approach reduces the complexity of migrating everything at once, minimizing the potential for disruptions.\n- You can gradually build confidence with GitLab CI and its benefits before considering further migrations.\n\n#### Challenges of Migration Strategy 2\nThe challenges of this approach include the following:\n- Even though you're not migrating all projects, the chosen projects' migration can still be intricate and require careful planning.\n- Ensuring seamless collaboration between projects on different platforms may require additional attention.\n\nThis strategy allows you to maximize the impact of GitLab CI by focusing on strategic areas, minimizing risk, and gradually gaining experience with the new tool.\n\n### Migration Strategy 3: Migrating everything\nThe third strategy is a comprehensive migration where you commit to moving all your CI/CD processes, projects, and workflows to GitLab CI. This approach aims for uniformity and simplification of CI/CD across all projects. This strategy can benefit from taking an iterative approach. Consider starting with new projects, followed by migrating strategic projects, and then leverage your growing knowledge and experience with GitLab CI to complete the migration of remaining projects. \n\n#### Benefits of Migration Strategy 3\nThe benefits of this approach include the following:\n- Uniform CI/CD processes across all projects can streamline administration and maintenance, reducing complexity.\n- You can take full advantage of GitLab CI's modern capabilities, from Infrastructure as Code to enhanced security features.\n- As your projects grow, GitLab CI is designed to handle increased demands, ensuring long-term scalability.\n\n#### Challenges of Migration Strategy 3\nThe challenges of this approach include the following:\n- A full-scale migration can be intricate, requiring meticulous planning and implementation.\n- The transition may disrupt ongoing projects and require a significant time investment.\n- Investment in training and potential tool migration expenses should be considered.\n\nOpt for this approach if uniformity and consolidation of CI/CD processes are a high priority, and you have the resources to execute a full migration.\n\nThe migration strategy you select should align with your organization's specific needs and circumstances. In all cases, the ultimate goal is to enhance your development process with modern CI/CD tools like GitLab CI, which offers scalability, infrastructure automation, security, and collaboration features that align with today's development needs.\n\n## Technical insights: How the migration works\nMoving your CI/CD workflows from Jenkins to GitLab CI is a transformative journey, and understanding how it works is vital for a successful transition.\n\n### Understanding the configurations: Jenkinsfile vs. .gitlab-ci.yml\nThe heart of your CI/CD pipeline lies in the configurations defined in your Jenkinsfile (for Jenkins) and .gitlab-ci.yml (for GitLab CI). While there are some similarities between these configuration files, there are notable differences as well.\n\n#### Similarities\n- Both files define the stages, jobs, and steps of your CI/CD process.\n- You specify the desired build, test, and deployment steps in both files.\n- Environment variables and settings can be configured in either file.\n\n#### Differences\n- Jenkinsfile uses Groovy for scripting, while .gitlab-ci.yml uses YAML. This change in language affects the way you write and structure your configurations.\n- The process of defining pipelines is more intuitive in .gitlab-ci.yml, with a cleaner, more human-readable syntax.\n- GitLab CI provides a wide range of built-in templates and predefined jobs, simplifying configuration and reducing the need for custom scripting.\n\n### Manually converting the pipeline configuration\nCurrently, migrating your existing Jenkins pipelines to GitLab CI is typically done manually. This means analyzing your Jenkinsfile and re-creating the equivalent configurations in .gitlab-ci.yml. While there are similarities in the concepts and structure, the differences in syntax and the specific capabilities of each platform require careful consideration during the migration.\n\n## Strategic planning for a smooth transition\nMigrating from Jenkins to GitLab CI requires meticulous planning to ensure a seamless transition. It's crucial to assess the disparities between the two systems and evaluate their impact on your workflow, considering aspects like security, cost, time, and capacity.\n\nOnce you've identified these differences and devised your migration strategy, break down the migration into key steps. These include setting up GitLab CI pipelines, securely transferring data from Jenkins to GitLab CI, and integrating GitLab CI into your existing tools and processes. \n\n## Case study: A seamless transition for Lockheed Martin\nLet's look at a real-world case study to illustrate the effectiveness of the \"Migrate Everything\" strategy. [Lockheed Martin](https://about.gitlab.com/customers/lockheed-martin/), the world’s largest defense contractor, had been using Jenkins for several years. As their project portfolio expanded, they realized that their Jenkins implementation with a wide variety of DevOps tools was becoming increasingly complex to manage. They were also eager to adopt modern CI/CD capabilities that Jenkins struggled to provide.\n\nIn collaboration with GitLab, Lockheed Martin decided to undertake a comprehensive migration to GitLab CI. Their goals included achieving consistency in their CI/CD processes, simplifying administration and maintenance, and taking full advantage of The GitLab Platform’s robust features.\n\nThe comprehensive migration strategy proved to be a resounding success for Lockheed Martin. With GitLab CI, they not only streamlined their CI/CD processes but achieved remarkable results. **They managed to run CI pipeline builds a staggering 80 times faster, retired thousands of Jenkins servers, and reduced the time spent on system maintenance by a staggering 90%. This monumental shift resulted in a significant increase in efficiency and productivity for Lockheed Martin.**\n\nThis case study showcases how a comprehensive migration strategy can be effective for organizations looking to leverage GitLab capabilities across all their projects.\n\nFor more in-depth insights into Lockheed Martin's successful transition to GitLab and how it streamlined their software development processes, check out [the detailed case study](https://about.gitlab.com/customers/lockheed-martin/).\n\n## GitLab documentation and support\nFor those embarking on this migration journey, GitLab offers documentation to guide you through the process. You can find valuable resources in GitLab's [official documentation](https://docs.gitlab.com/ee/ci/migration/jenkins.html).\n\nIn addition to documentation, GitLab's Professional Services team is available to assist organizations in their migrations. They bring expertise and experience to ensure a smooth transition. Whether it's understanding the nuances of Jenkinsfile to .gitlab-ci.yml conversion or optimizing your CI/CD workflows, their support can be invaluable.\n\n> Try GitLab CI/CD today with [a free trial of Ultimate](https://gitlab.com/-/trials/new).\n",[724,277,9,231,703,849],{"slug":2265,"featured":6,"template":680},"jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment","content:en-us:blog:jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment.yml","Jenkins Gitlab Ultimate Guide To Modernizing Cicd Environment","en-us/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment.yml","en-us/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment",{"_path":2271,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2272,"content":2278,"config":2284,"_id":2286,"_type":14,"title":2287,"_source":16,"_file":2288,"_stem":2289,"_extension":19},"/en-us/blog/learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions",{"title":2273,"description":2274,"ogTitle":2273,"ogDescription":2274,"noIndex":6,"ogImage":2275,"ogUrl":2276,"ogSiteName":693,"ogType":694,"canonicalUrls":2276,"schema":2277},"Learn advanced Rust programming with a little help from AI","Use this guided tutorial, along with AI-powered GitLab Duo Code Suggestions, to continue learning advanced Rust programming.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662439/Blog/Hero%20Images/codewithheart.png","https://about.gitlab.com/blog/learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learn advanced Rust programming with a little help from AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-10-12\",\n      }",{"title":2273,"description":2274,"authors":2279,"heroImage":2275,"date":2280,"body":2281,"category":10,"tags":2282},[1217],"2023-10-12","When I started learning a new programming language more than 20 years ago, we had access to the Visual Studio 6 MSDN library, installed from 6 CD-ROMs. Algorithms with pen and paper, design pattern books, and MSDN queries to figure out the correct type were often time-consuming. Learning a new programming language changed fundamentally in the era of remote collaboration and artificial intelligence (AI). Now you can spin up a [remote development workspace](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/), share your screen, and engage in a group programming session. With the help of [GitLab Duo Code Suggestions](/gitlab-duo/), you always have an intelligent partner at your fingertips. Code Suggestions can learn from your programming style and experience. They only need input and context to provide you with the most efficient suggestions.\n\nIn this tutorial, we build on the [getting started blog post](/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/) and design and create a simple feed reader application.\n\n- [Preparations](#preparations)\n    - [Code Suggestions](#code-suggestions)\n- [Continue learning Rust](#continue-learning-rust)\n    - [Hello, Reader App](#hello-reader-app)\n    - [Initialize project](#initialize-project)\n    - [Define RSS feed URLs](#define-rss-feed-urls)\n- [Modules](#modules)\n    - [Call the module function in main()](#call-the-module-function-in-main)\n- [Crates](#crates)\n    - [feed-rs: parse XML feed](#feed-rs-parse-xml-feed)\n- [Runtime configuration: Program arguments](#runtime-configuration-program-arguments)\n    - [User input error handling](#user-input-error-handling)\n- [Persistence and data storage](#persistence-and-data-storage)\n- [Optimization](#optimization)\n    - [Asynchronous execution](#asynchronous-execution)\n    - [Spawning threads](#spawning-threads)\n    - [Function scopes, threads, and closures](#function-scopes-threads-and-closures)\n- [Parse feed XML into objects](#parse-feed-xml-into-object-types)\n    - [Map generic feed data types](#map-generic-feed-data-types)\n    - [Error handling with Option::unwrap()](#error-handling-with-option-unwrap)\n- [Benchmarks](#benchmarks)\n    - [Sequential vs. Parallel execution benchmark](#sequential-vs-parallel-execution-benchmark)\n    - [CI/CD with Rust caching](#cicd-with-rust-caching)\n- [What is next](#what-is-next)\n    - [Async learning exercises](#async-learning-exercises)\n    - [Share your feedback](#share-your-feedback)\n\n## Preparations\nBefore diving into the source code, make sure to set up [VS Code](/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/#vs-code) and [your development environment with Rust](/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/#development-environment-for-rust).\n\n### Code Suggestions\nFamiliarize yourself with suggestions before actually verifying the suggestions. GitLab Duo Code Suggestions are provided as you type, so you do not need use specific keyboard shortcuts. To accept a code suggestion, press the `tab` key. Also note that writing new code works more reliably than refactoring existing code. AI is non-deterministic, which means that the same suggestion may not be repeated after deleting the code suggestion. While Code Suggestions is in Beta, we are working on improving the accuracy of generated content overall. Please review the [known limitations](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#known-limitations), as this could affect your learning experience.\n\n**Tip:** The latest release of Code Suggestions supports multi-line instructions. You can refine the specifications to your needs to get better suggestions.\n\n```rust\n    // Create a function that iterates over the source array\n    // and fetches the data using HTTP from the RSS feed items.\n    // Store the results in a new hash map.\n    // Print the hash map to the terminal.\n```\n\nThe VS Code extension overlay is shown when offering a suggestion. You can use the `tab` key to accept the suggested line(s), or `cmd cursor right` to accept one word. Additionally, the three dots menu allows you to always show the toolbar.\n\n![VS Code GitLab Duo Code Suggestions overlay with instructions](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_code_suggestions_options_overlay_keep_toolbar.png){: .shadow}\n\n## Continue learning Rust\nNow, let us continue learning Rust, which is one of the [supported languages in Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#supported-languages). [Rust by Example](https://doc.rust-lang.org/rust-by-example/) provides an excellent tutorial for beginners, together with the official [Rust book](https://doc.rust-lang.org/book/). Both resources are referenced throughout this blog post.\n\n### Hello, Reader App\nThere are many ways to create an application and learn Rust. Some of them involve using existing Rust libraries - so-called `Crates`. We will use them a bit further into the blog post. For example, you could create a command-line app that processes images and writes the result to a file. Solving a classic maze or writing a Sudoku solver can also be a fun challenge. Game development is another option. The book [Hands-on Rust](https://hands-on-rust.com/) provides a thorough learning path by creating a dungeon crawler game. My colleague Fatima Sarah Khalid started the [Dragon Realm in C++ with a little help from AI](/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions/) -- check it out, too.\n\nHere is a real use case that helps solve an actual problem: Collecting important information from different sources into RSS feeds for (security) releases, blog posts, and social discussion forums like Hacker News. Often, we want to filter for specific keywords or versions mentioned in the updates. These requirements allow us to formulate a requirements list for our application:\n\n1. Fetch data from different sources (HTTP websites, REST API, RSS feeds). RSS feeds in the first iteration.\n1. Parse the data.\n1. Present the data to the user, or write it to disk.\n1. Optimize performance.\n\nThe following example application output will be available after the learning steps in this blog post:\n\n![VS Code Terminal, cargo run with formatted feed entries output](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_cargo_run_formatted_output_final.png)\n\nThe application should be modular and build the foundation to add more data types, filters, and hooks to trigger actions at a later point.\n\n### Initialize project\nReminder: `cargo init` in the project root creates the file structure, including the `main()` entrypoint. Therefore, we will learn how to create and use Rust modules in the next step.\n\nCreate a new directory called `learn-rust-ai-app-reader`, change into it and run `cargo init`. This command implicitly runs `git init` to initialize a new Git repository locally. The remaining step is to configure the Git remote repository path, for example, `https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader`. Please adjust the path for your namespace. Pushing the Git repository [automatically creates a new private project in GitLab](https://docs.gitlab.com/ee/user/project/#create-a-new-project-with-git-push).\n\n```shell\nmkdir learn-rust-ai-app-reader\ncd learn-rust-ai-app-reader\n\ncargo init\n\ngit remote add origin https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader.git\ngit push --set-upstream origin main\n```\n\nOpen VS Code from the newly created directory. The `code` CLI will spawn a new VS Code window on macOS.\n\n```shell\ncode .\n```\n\n### Define RSS feed URLs\nAdd a new hashmap to store the RSS feed URLs inside the `src/main.rs` file in the `main()` function. You can instruct GitLab Duo Code Suggestions with a multi-line comment to create a [`HashMap`](https://doc.rust-lang.org/stable/std/collections/struct.HashMap.html) object, and initialize it with default values for Hacker News, and TechCrunch. Note: Verify that the URLs are correct when you get suggestions.\n\n```rust\nfn main() {\n    // Define RSS feed URLs in the variable rss_feeds\n    // Use a HashMap\n    // Add Hacker News and TechCrunch\n    // Ensure to use String as type\n\n}\n```\n\nNote that the code comment provides instructions for:\n\n1. The variable name `rss_feeds`.\n2. The `HashMap` type.\n3. Initial seed key/value pairs.\n4. String as type (can be seen with `to_string()` calls).\n\nOne possible suggested path can be as follows:\n\n```rust\nuse std::collections::HashMap;\n\nfn main() {\n    // Define RSS feed URLs in the variable rss_feeds\n    // Use a HashMap\n    // Add Hacker News and TechCrunch\n    // Ensure to use String as type\n    let rss_feeds = HashMap::from([\n        (\"Hacker News\".to_string(), \"https://news.ycombinator.com/rss\".to_string()),\n        (\"TechCrunch\".to_string(), \"https://techcrunch.com/feed/\".to_string()),\n    ]);\n\n}\n```\n\n![VS Code with Code Suggestions for RSS feed URLs for Hacker News and TechCrunch](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_main_array_rss_feed_urls_suggested.png)\n\nOpen a new terminal in VS Code (cmd shift p - search for `terminal`), and run `cargo build` to build the changes. The error message instructs you to add the `use std::collections::HashMap;` import.\n\nThe next step is to do something with the RSS feed URLs. [The previous blog post](/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/) taught us to split code into functions. We want to organize the code more modularly for our reader application, and use Rust modules.\n\n## Modules\n[Modules](https://doc.rust-lang.org/rust-by-example/mod.html) help with organizing code. They can also be used to hide functions into the module scope, limiting access to them from the main() scope. In our reader application, we want to fetch the RSS feed content, and parse the XML response. The `main()` caller should only be able to access the `get_feeds()` function, while other functionality is only available in the module.\n\nCreate a new file `feed_reader.rs` in the `src/` directory. Instruct Code Suggestions to create a public module named `feed_reader`, and a public function `get_feeds()` with a String HashMap as input. Important: The file and module names need to be the same, following the [Rust module structure](https://doc.rust-lang.org/book/ch07-02-defining-modules-to-control-scope-and-privacy.html).\n\n![Code Suggestions: Create public module, with function and input types](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_public_module_function_input.png){: .shadow}\n\nInstructing Code Suggestions with the input variable name and type will also import the required `std::collections::HashMap` module. Tip: Experiment with the comments, and refine the variable types to land the best results. Passing function parameters as object references is considered best practice in Rust, for example.\n\n```rust\n// Create public module feed_reader\n// Define get_feeds() function which takes rss_feeds as String HashMap reference as input\npub mod feed_reader {\n    use std::collections::HashMap;\n\n    pub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n        // Do something with the RSS feeds\n    }\n}\n```\n\n![Code Suggestions: Public module with `get_feeds()` function, and suggested input variable](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_public_module_function_input.png){: .shadow}\n\nInside the function, continue to instruct Code Suggestions with the following steps:\n\n1. `// Iterate over the RSS feed URLs`\n2. `// Fetch URL content`\n3. `// Parse XML body`\n4. `// Print the result`\n\n![Code Suggestions: Public module with `get_feeds()` function, step 1: Iterate](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_01_iterate.png){: .shadow}\n\n![Code Suggestions: Public module with `get_feeds()` function, step 2: Fetch URL content](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_02_fetch_content.png){: .shadow}\n\n![Code Suggestions: Public module with `get_feeds()` function, step 3: Parse XML body](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_03_parse_body.png){: .shadow}\n\n![Code Suggestions: Public module with `get_feeds()` function, step 4: Print the results](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_04_print_result.png){: .shadow}\n\nThe following code can be suggested:\n\n```rust\n// Create public module feed_reader\n// Define get_feeds() function which takes rss_feeds as String HashMap reference as input\npub mod feed_reader {\n    use std::collections::HashMap;\n\n    pub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n        // Iterate over the RSS feed URLs\n        for (name, url) in rss_feeds {\n            println!(\"{}: {}\", name, url);\n\n            // Fetch URL content\n            let body = reqwest::blocking::get(url).unwrap().text().unwrap();\n\n            // Parse XML body\n            let parsed_body = roxmltree::Document::parse(&body).unwrap();\n\n            // Print the result\n            println!(\"{:#?}\", parsed_body);\n        }\n    }\n}\n```\n\nYou see a new keyword here: [`unwrap()`](https://doc.rust-lang.org/rust-by-example/error/option_unwrap.html). Rust does not support `null` values, and uses the [`Option` type](https://doc.rust-lang.org/rust-by-example/std/option.html) for any value. If you are certain to use a specific wrapped type, for example, `Text` or `String`, you can call the `unwrap()` method to get the value. The `unwrap()` method will panic if the value is `None`.\n\n**Note** Code Suggestions referred to the `reqwest::blocking::get` function for the `// Fetch URL content` comment instruction. The [`reqwest` crate](https://docs.rs/reqwest/latest/reqwest/) name is intentional and not a typo. It provides a convenient, higher-level HTTP client for async and blocking requests.\n\nParsing the XML body is tricky - you might get different results, and the schema is not the same for every RSS feed URL. Let us try to call the `get_feeds()` function, and then work on improving the code.\n\n### Call the module function in main()\n\nThe main() function does not know about the `get_feeds()` function yet, so we need to import its module. In other programming languages, you might have seen the keywords `include` or `import`. The Rust module system is different.\n\nModules are organized in path directories. In our example, both source files exist on the same directory level. `feed_reader.rs` is interpreted as crate, containing one module called `feed_reader`, which defines the function `get_feeds()`.\n\n```\nsrc/\n  main.rs\n  feed_reader.rs\n```\n\nIn order to access `get_feeds()` from the `feed_reader.rs` file, we need to [bring module path](https://doc.rust-lang.org/book/ch07-04-bringing-paths-into-scope-with-the-use-keyword.html) into the `main.rs` scope first, and then call the full function path.\n\n```rust\nmod feed_reader;\n\nfn main() {\n\n    feed_reader::feed_reader::get_feeds(&rss_feeds);\n\n```\n\nAlternatively, we can import the full function path with the `use` keyword, and later use the short function name.\n\n```rust\nmod feed_reader;\nuse feed_reader::feed_reader::get_feeds;\n\nfn main() {\n\n    get_feeds(&rss_feeds);\n\n```\n\n**Tip:** I highly recommend reading the [Clear explanation of the Rust module system blog post](https://www.sheshbabu.com/posts/rust-module-system/) to get a better visual understanding.\n\n```diff\n\nfn main() {\n    // ...\n\n    // Print feed_reader get_feeds() output\n    println!(\"{}\", feed_reader::get_feeds(&rss_feeds));\n```\n\n```rust\nuse std::collections::HashMap;\n\nmod feed_reader;\n// Alternative: Import full function path\n//use feed_reader::feed_reader::get_feeds;\n\nfn main() {\n    // Define RSS feed URLs in the variable rss_feeds\n    // Use a HashMap\n    // Add Hacker News and TechCrunch\n    // Ensure to use String as type\n    let rss_feeds = HashMap::from([\n        (\"Hacker News\".to_string(), \"https://news.ycombinator.com/rss\".to_string()),\n        (\"TechCrunch\".to_string(), \"https://techcrunch.com/feed/\".to_string()),\n    ]);\n\n    // Call get_feeds() from feed_reader module\n    feed_reader::feed_reader::get_feeds(&rss_feeds);\n    // Alternative: Imported full path, use short path here.\n    //get_feeds(&rss_feeds);\n}\n```\n\nRun `cargo build` in the terminal again to build the code.\n\n```shell\ncargo build\n```\n\nPotential build errors when Code Suggestions refer to common code and libraries for HTTP requests, and XML parsing:\n\n1. Error: `could not find blocking in reqwest`. Solution: Enable the `blocking` feature for the crate in `Config.toml`: `reqwest = { version = \"0.11.20\", features = [\"blocking\"] }`.\n2. Error: `failed to resolve: use of undeclared crate or module reqwest`. Solution: Add the `reqwest` crate.\n3. Error: `failed to resolve: use of undeclared crate or module roxmltree`. Solution: Add the `roxmltree` crate.\n\n```shell\nvim Config.toml\n\nreqwest = { version = \"0.11.20\", features = [\"blocking\"] }\n```\n\n```shell\ncargo add reqwest\ncargo add roxmltree\n```\n\n**Tip:** Copy the error message string, with a leading `Rust \u003Cerror message>` into your preferred browser to check whether a missing crate is available. Usually this search leads to a result on crates.io and you can add the missing dependencies.\n\nWhen the build is successful, run the code with `cargo run` and inspect the Hacker News RSS feed output.\n\n![VS Code terminal, cargo run to fetch Hacker News XML feed](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_fetch_rss_feed_output_hacker_news.png){: .shadow}\n\nWhat is next with parsing the XML body into human-readable format? In the next section, we will learn about existing solutions and how Rust crates come into play.\n\n## Crates\nRSS feeds share a common set of protocols and specifications. It feels like reinventing the wheel to parse XML items and understand the lower object structure. Recommendation for these types of tasks: Look whether someone else had the same problem already and might have created code to solve the problem.\n\nReusable library code in Rust is organized in so-called [`Crates`](https://doc.rust-lang.org/rust-by-example/crates.html), and made available in packages, and the package registry on crates.io. You can add these dependencies to your project by editing the `Config.toml` in the `[dependencies]` section, or using `cargo add \u003Cname>`.\n\nFor the reader app, we want to use the [feed-rs crate](https://crates.io/crates/feed-rs). Open a new terminal, and run the following command:\n\n```shell\ncargo add feed-rs\n```\n\n![VS Code Terminal Terminal: Add crate, verify in Config.toml](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_rust_crate_add_feed-rs_explained.png)\n\n### feed-rs: parse XML feed\nNavigate into `src/feed_reader.rs` and modify the part where we parse the XML body. Code Suggestions understands how to call the `feed-rs` crate `parser::parse` function -- there is only one specialty here: `feed-rs` [expects string input as raw bytes](https://docs.rs/feed-rs/latest/feed_rs/parser/fn.parse_with_uri.html) to determine the encoding itself. We can provide instructions in the comment to get the expected result though.\n\n```rust\n            // Parse XML body with feed_rs parser, input in bytes\n            let parsed_body = feed_rs::parser::parse(body.as_bytes()).unwrap();\n```\n\n![Code Suggestions: Public module with `get_feeds()` function, step 5: Modify XML parser to feed-rs](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_05_use_feed_rs_to_parse.png){: .shadow}\n\nThe benefit of using `feed-rs` is not immediately visible until you see the printed output with `cargo run`: All keys and values are mapped to their respective Rust object types, and can be used for further operations.\n\n![VS Code terminal, cargo run to fetch Hacker News XML feed](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_fetch_rss_feed_output_hacker_news_feed_rs.png){: .shadow}\n\n## Runtime configuration: Program arguments\nUntil now, we have run the program with hard-coded RSS feed values compiled into the binary. The next step is allowing to configure the RSS feeds at runtime.\n\nRust provides [program arguments](https://doc.rust-lang.org/rust-by-example/std_misc/arg.html) in the standard misc library. [Parsing the arguments](https://doc.rust-lang.org/rust-by-example/std_misc/arg/matching.html) provides a better and faster learning experience than aiming for advanced program argument parsers (for example, the [clap](https://docs.rs/clap/latest/clap/) crate), or moving the program parameters into a configuration file and format ([TOML](https://toml.io/en/), YAML). You are reading these lines after I tried and failed with different routes for the best learning experience. This should not stop you from taking the challenge to configure RSS feeds in alternative ways.\n\nAs a boring solution, the command parameters can be passed as `\"name,url\"` string value pairs, and then are split by the `,` character to extract the name and URL values. The comment instructs Code Suggestions to perform these operations and extend the `rss_feeds` HashMap with the new values. Note that the variable might not be mutable, and, therefore, needs to be modified to `let mut rss_feeds`.\n\nNavigate into `src/main.rs` and add the following code to the `main()` function after the `rss_feeds` variable. Start with a comment to define the program arguments, and check the suggested code snippets.\n\n```rust\n    // Program args, format \"name,url\"\n    // Split value by , into name, url and add to rss_feeds\n```\n\n![Code suggestions for program arguments, and splitting name,URL values for the rss_feeds variable](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_program_args_boring_solution.png){: .shadow}\n\nThe full code example can look like the following:\n\n```rust\nfn main() {\n    // Define RSS feed URLs in the variable rss_feeds\n    // Use a HashMap\n    // Add Hacker News and TechCrunch\n    // Ensure to use String as type\n    let mut rss_feeds = HashMap::from([\n        (\"Hacker News\".to_string(), \"https://news.ycombinator.com/rss\".to_string()),\n        (\"TechCrunch\".to_string(), \"https://techcrunch.com/feed/\".to_string()),\n    ]);\n\n    // Program args, format \"name,url\"\n    // Split value by , into name, url and add to rss_feeds\n    for arg in std::env::args().skip(1) {\n        let mut split = arg.split(\",\");\n        let name = split.next().unwrap();\n        let url = split.next().unwrap();\n        rss_feeds.insert(name.to_string(), url.to_string());\n    }\n\n    // Call get_feeds() from feed_reader module\n    feed_reader::feed_reader::get_feeds(&rss_feeds);\n    // Alternative: Imported full path, use short path here.\n    //get_feeds(&rss_feeds);\n}\n```\n\nYou can pass program arguments directly to the `cargo run` command, preceding the arguments with `--`. Enclose all arguments with double quotes, put the name followed by a comma and the RSS feed URL as argument. Separate all arguments with whitespaces.\n\n```\ncargo build\n\ncargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https://www.cncf.io/feed/\"\n```\n\n![VS Code terminal, RSS feed output example for the GitLab blog](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_gitlab_blog_rss_feed_example.png){: .shadow}\n\n### User input error handling\nIf the provided user input does not match the program expectation, we need to [throw an error](https://doc.rust-lang.org/rust-by-example/error.html) and help the caller to fix the program arguments. For example, passing a malformed URL format should be treated as a runtime error. Instruct Code Suggestions with a code comment to throw an error if the URL is not valid.\n\n```rust\n    // Ensure that URL contains a valid format, otherwise throw an error\n```\n\nOne possible solution is to check if the `url` variable starts with `http://` or `https://`. If not, throw an error using the [panic! macro](https://doc.rust-lang.org/rust-by-example/std/panic.html). The full code example looks like the following:\n\n```rust\n    // Program args, format \"name,url\"\n    // Split value by , into name, url and add to rss_feeds\n    for arg in std::env::args().skip(1) {\n        let mut split = arg.split(\",\");\n        let name = split.next().unwrap();\n        let url = split.next().unwrap();\n\n        // Ensure that URL contains a valid format, otherwise throw an error\n        if !url.starts_with(\"http://\") && !url.starts_with(\"https://\") {\n            panic!(\"Invalid URL format: {}\", url);\n        }\n\n        rss_feeds.insert(name.to_string(), url.to_string());\n    }\n```\n\nTest the error handling with removing a `:` in one of the URL strings. Add the `RUST_BACKTRACE=full` environment variable to get more verbose output when the `panic()` call happens.\n\n```\nRUST_BACKTRACE=full cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https//www.cncf.io/feed/\"\n```\n\n![VS Code Terminal with wrong URL format, panic error backtrace](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_url_format_error_panic_backtrace.png){: .shadow}\n\n## Persistence and data storage\nThe boring solution for storing the feed data is to dump the parsed body into a new file. Instruct Code Suggestions to use a pattern that includes the RSS feed name, and the current ISO date.\n\n```rust\n    // Parse XML body with feed_rs parser, input in bytes\n    let parsed_body = feed_rs::parser::parse(body.as_bytes()).unwrap();\n\n    // Print the result\n    println!(\"{:#?}\", parsed_body);\n\n    // Dump the parsed body to a file, as name-current-iso-date.xml\n    let now = chrono::offset::Local::now();\n    let filename = format!(\"{}-{}.xml\", name, now.format(\"%Y-%m-%d\"));\n    let mut file = std::fs::File::create(filename).unwrap();\n    file.write_all(body.as_bytes()).unwrap();\n```\n\nA possible suggestion will include using the [chrono crate](https://crates.io/crates/chrono). Add it using `cargo add chrono` and then invoke `cargo build` and `cargo run` again.\n\nThe files are written into the same directory where `cargo run` was executed. If you are executing the binary direcly in the `target/debug/` directory, all files will be dumped there.\n\n![VS Code with CNCF RSS feed content file, saved on disk](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_cncf_rss_feed_saved_on_disk.png)\n\n## Optimization\nThe entries in the `rss_feeds` variable are executed sequentially. Imagine having a list of 100+ URLs configured - this could take a long time to fetch and process. What if we could execute multiple fetch requests in parallel?\n\n### Asynchronous execution\nRust provides [threads](https://doc.rust-lang.org/book/ch16-01-threads.html) for asynchronous execution.\n\nThe simplest solution will be spawning a thread for each RSS feed URL. We will discuss optimization strategies later. Before you continue with parallel execution, measure the sequential code execution time by preceding the `time` command with `cargo run`.\n\n```\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https://www.cncf.io/feed/\"\n\n0.21s user 0.08s system 10% cpu 2.898 total\n```\n\nNote that this exercise could require more manual code work. It is recommended to persist the sequential working state in a new Git commit and branch `sequential-exec`, to better compare the impact of parallel execution.\n\n```shell\ngit commit -avm \"Sequential execution working\"\ngit checkout -b sequential-exec\ngit push -u origin sequential-exec\n\ngit checkout main\n```\n\n### Spawning threads\nOpen `src/feed_reader.rs` and refactor the `get_feeds()` function. Start with a Git commit for the current state, and then delete the contents of the function scope. Add the following code comments with instructions for Code Suggestions:\n\n1. `// Store threads in vector`: Store thread handles in a vector, so we can wait for them to finish at the end of the function call.\n2. `// Loop over rss_feeds and spawn threads`: Create boilerplate code for iterating over all RSS feeds, and spawn a new thread.\n\nAdd the following `use` statements to work with the `thread` and `time` modules.\n\n```rust\n    use std::thread;\n    use std::time::Duration;\n```\n\nContinue writing the code, and close the for loop. Code Suggestions will automatically propose adding the thread handle in the `threads` vector variable, and offer to join the threads at the end of the function.\n\n```rust\n    pub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n\n        // Store threads in vector\n        let mut threads: Vec\u003Cthread::JoinHandle\u003C()>> = Vec::new();\n\n        // Loop over rss_feeds and spawn threads\n        for (name, url) in rss_feeds {\n            let thread_name = name.clone();\n            let thread_url = url.clone();\n            let thread = thread::spawn(move || {\n\n            });\n            threads.push(thread);\n        }\n\n        // Join threads\n        for thread in threads {\n            thread.join().unwrap();\n        }\n    }\n```\n\nAdd the `thread` crate, build and run the code again.\n\n```shell\ncargo add thread\n\ncargo build\n\ncargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https://www.cncf.io/feed/\"\n```\n\nAt this stage, no data is processed or printed. Before we continue re-adding the functionality, let us learn about the newly introduced keywords here.\n\n### Function scopes, threads, and closures\nThe suggested code brings new keywords and design patterns to learn. The thread handle is of the type `thread::JoinHandle`, indicating that we can use it to wait for the threads to finish ([join()](https://doc.rust-lang.org/book/ch16-01-threads.html#waiting-for-all-threads-to-finish-using-join-handles)).\n\n`thread::spawn()` spawns a new thread, where we can pass a function object. In this case, a [closure](https://doc.rust-lang.org/book/ch13-01-closures.html) expression is passed as anonymous function. Closure inputs are passed using the `||` syntax. You will recognize the [`move` Closure](https://doc.rust-lang.org/book/ch16-01-threads.html#using-move-closures-with-threads), which moves the function scoped variables into the thread scope. This avoids manually specifying which variables need to be passed into the new function/closure scope.\n\nThere is a limitation though: `rss_feeds` is a reference `&`, passed as parameter by the `get_feeds()` function caller. The variable is only valid in the function scope. Use the following code snippet to provoke this error:\n\n```rust\npub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n\n    // Store threads in vector\n    let mut threads: Vec\u003Cthread::JoinHandle\u003C()>> = Vec::new();\n\n    // Loop over rss_feeds and spawn threads\n    for (key, value) in rss_feeds {\n        let thread = thread::spawn(move || {\n            println!(\"{}\", key);\n        });\n    }\n}\n```\n\n![VS Code Terminal, variable scope error with references and thread move closure](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_cargo_build_error_function_threads_variable_scopes.png){: .shadow}\n\nAlthough the `key` variable was created in the function scope, it references the `rss_feeds` variable, and therefore, it cannot be moved into the thread scope. Any values accessed from the function parameter `rss_feeds` hash map will require a local copy with `clone()`.\n\n![VS Code Terminal, thread spawn with clone](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_thread_spawn_clone.png){: .shadow}\n\n```rust\npub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n\n    // Store threads in vector\n    let mut threads: Vec\u003Cthread::JoinHandle\u003C()>> = Vec::new();\n\n    // Loop over rss_feeds and spawn threads\n    for (name, url) in rss_feeds {\n        let thread_name = name.clone();\n        let thread_url = url.clone();\n        let thread = thread::spawn(move || {\n            // Use thread_name and thread_url as values, see next chapter for instructions.\n```\n\n## Parse feed XML into object types\nThe next step is to repeat the RSS feed parsing steps in the thread closure. Add the following code comments with instructions for Code Suggestions:\n\n1. `// Parse XML body with feed_rs parser, input in bytes` to tell Code Suggestions that we want to fetch the RSS feed URL content, and parse it with the `feed_rs` crate functions.\n2. `// Check feed_type attribute feed_rs::model::FeedType::RSS2 or Atom and print its name`: Extract the feed type by comparing the `feed_type` attribute with the [`feed_rs::model::FeedType`](https://docs.rs/feed-rs/latest/feed_rs/model/enum.FeedType.html). This needs more direct instructions for Code Suggestions telling it about the exact Enum values to match against.\n\n![Instruct Code Suggestions to match against specific feed types](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_feed_rs_type_condition.png){: .shadow}\n\n```rust\n            // Parse XML body with feed_rs parser, input in bytes\n            let body = reqwest::blocking::get(thread_url).unwrap().bytes().unwrap();\n            let feed = feed_rs::parser::parse(body.as_ref()).unwrap();\n\n            // Check feed_type attribute feed_rs::model::FeedType::RSS2 or Atom and print its name\n            if feed.feed_type == feed_rs::model::FeedType::RSS2 {\n                println!(\"{} is an RSS2 feed\", thread_name);\n            } else if feed.feed_type == feed_rs::model::FeedType::Atom {\n                println!(\"{} is an Atom feed\", thread_name);\n            }\n```\n\nBuild and run the program again, and verify its output.\n\n```\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https://www.cncf.io/feed/\"\n\nCNCF is an RSS2 feed\nTechCrunch is an RSS2 feed\nGitLab Blog is an Atom feed\nHacker News is an RSS2 feed\n```\n\nLet us verify this output by opening the feed URLs in the browser, or inspecting the previously downloaded files.\n\nHacker News supports RSS version 2.0, with `channel(title,link,description,item(title,link,pubDate,comments))`. TechCrunch and the CNCF blog follow a similar structure.\n```xml\n\u003Crss version=\"2.0\">\u003Cchannel>\u003Ctitle>Hacker News\u003C/title>\u003Clink>https://news.ycombinator.com/\u003C/link>\u003Cdescription>Links for the intellectually curious, ranked by readers.\u003C/description>\u003Citem>\u003Ctitle>Writing a debugger from scratch: Breakpoints\u003C/title>\u003Clink>https://www.timdbg.com/posts/writing-a-debugger-from-scratch-part-5/\u003C/link>\u003CpubDate>Wed, 27 Sep 2023 06:31:25 +0000\u003C/pubDate>\u003Ccomments>https://news.ycombinator.com/item?id=37670938\u003C/comments>\u003Cdescription>\u003C![CDATA[\u003Ca href=\"https://news.ycombinator.com/item?id=37670938\">Comments\u003C/a>]]>\u003C/description>\u003C/item>\u003Citem>\n```\n\nThe GitLab blog uses the [Atom](https://datatracker.ietf.org/doc/html/rfc4287) feed format similar to RSS, but still requires different parsing logic.\n```xml\n\u003C?xml version='1.0' encoding='utf-8' ?>\n\u003Cfeed xmlns='http://www.w3.org/2005/Atom'>\n\u003C!-- / Get release posts -->\n\u003C!-- / Get blog posts -->\n\u003Ctitle>GitLab\u003C/title>\n\u003Cid>https://about.gitlab.com/blog\u003C/id>\n\u003Clink href='https://about.gitlab.com/blog/' />\n\u003Cupdated>2023-09-26T00:00:00+00:00\u003C/updated>\n\u003Cauthor>\n\u003Cname>The GitLab Team\u003C/name>\n\u003C/author>\n\u003Centry>\n\u003Ctitle>Atlassian Server ending: Goodbye disjointed toolchain, hello DevSecOps platform\u003C/title>\n\u003Clink href='https://about.gitlab.com/blog/atlassian-server-ending-move-to-a-single-devsecops-platform/' rel='alternate' />\n\u003Cid>https://about.gitlab.com/blog/atlassian-server-ending-move-to-a-single-devsecops-platform/\u003C/id>\n\u003Cpublished>2023-09-26T00:00:00+00:00\u003C/published>\n\u003Cupdated>2023-09-26T00:00:00+00:00\u003C/updated>\n\u003Cauthor>\n\u003Cname>Dave Steer, Justin Farris\u003C/name>\n\u003C/author>\n```\n\n### Map generic feed data types\nUsing [`roxmltree::Document::parse`](https://docs.rs/roxmltree/latest/roxmltree/struct.Document.html) would require us to understand the XML node tree and its specific tag names. Fortunately, [feed_rs::model::Feed](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Feed.html) provides a combined model for RSS and Atom feeds, therefore let us continue using the `feed_rs` crate.\n\n1. Atom: Feed->Feed, Entry->Entry\n2. RSS: Channel->Feed, Item->Entry\n\nIn addition to the mapping above, we need to extract the required attributes, and map their data types. It is helpful to open the [feed_rs::model documentation](https://docs.rs/feed-rs/latest/feed_rs/model/index.html) to understand the structs and their fields and implementations. Otherwise, some suggestions would result in type conversion errors and compilation failures, that are specific to the `feed_rs` implementation.\n\nA [`Feed`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Feed.html) struct provides the `title`, type `Option\u003CText>` (either a value is set, or nothing). An [`Entry`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Entry.html) struct provides:\n\n1. `title`: `Option\u003CText>`with [`Text`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Text.html) and the `content` field as `String`.\n2. `updated`: `Option\u003CDateTime\u003CUtc>>` with [`DateTime`](https://docs.rs/chrono/latest/chrono/struct.DateTime.html) with the [`format()` method](https://docs.rs/chrono/latest/chrono/struct.DateTime.html#method.format).\n3. `summary`: `Option\u003CText>` [`Text`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Text.html) and the `content` field as `String`.\n4. `links`: `Vec\u003CLink>`, vector with [`Link`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Link.html) items. The `href` attribute provides the raw URL string.\n\nUse this knowledge to extract the required data from the feed entries. Reminder that all `Option` types need to call `unwrap()`, which requires more raw instructions for Code Suggestions.\n\n```rust\n                // https://docs.rs/feed-rs/latest/feed_rs/model/struct.Feed.html\n                // https://docs.rs/feed-rs/latest/feed_rs/model/struct.Entry.html\n                // Loop over all entries, and print\n                // title.unwrap().content\n                // published.unwrap().format\n                // summary.unwrap().content\n                // links href as joined string\n                for entry in feed.entries {\n                    println!(\"Title: {}\", entry.title.unwrap().content);\n                    println!(\"Published: {}\", entry.published.unwrap().format(\"%Y-%m-%d %H:%M:%S\"));\n                    println!(\"Summary: {}\", entry.summary.unwrap().content);\n                    println!(\"Links: {:?}\", entry.links.iter().map(|link| link.href.clone()).collect::\u003CVec\u003CString>>().join(\", \"));\n                    println!();\n                }\n```\n\n![Code suggestions to print feed entry types, with specific requirements](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_print_feed_entries_fields_with_rust_type_specifics.png){: .shadow}\n\n### Error handling with Option unwrap()\nContinue iterating on the multi-line instructions after building and running the program again. Spoiler: `unwrap()` will call the `panic!` macro and crash the program when it encounters empty values. This can happen if a field like `summary` is not set in the feed data.\n\n```shell\nGitLab Blog is an Atom feed\nTitle: How the Colmena project uses GitLab to support citizen journalists\nPublished: 2023-09-27 00:00:00\nthread '\u003Cunnamed>' panicked at 'called `Option::unwrap()` on a `None` value', src/feed_reader.rs:40:59\n```\n\nA potential solution is to use [`std::Option::unwrap_or_else`](https://doc.rust-lang.org/std/option/enum.Option.html#method.unwrap_or_else) and set an empty string as default value. The syntax requires a closure that returns an empty `Text` struct instantiation.\n\nSolving the problem required many attempts to find the correct initialization, passing just an empty string did not work with the custom types. I will show you all my endeavors, including the research paths.\n\n```rust\n// Problem: The `summary` attribute is not always initialized. unwrap() will panic! then.\n// Requires use mime; and use feed_rs::model::Text;\n/*\n// 1st attempt: Use unwrap() to extraxt Text from Option\u003CText> type.\nprintln!(\"Summary: {}\", entry.summary.unwrap().content);\n// 2nd attempt. Learned about unwrap_or_else, passing an empty string.\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| \"\").content);\n// 3rd attempt. summary is of the Text type, pass a new struct instantiation.\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{}).content);\n// 4th attempt. Struct instantiation requires 3 field values.\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{\"\", \"\", \"\"}).content);\n// 5th attempt. Struct instantation with public fields requires key: value syntax\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type: \"\", src: \"\", content: \"\"}).content);\n// 6th attempt. Reviewed expected Text types in https://docs.rs/feed-rs/latest/feed_rs/model/struct.Text.html and created Mime and String objects\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type: mime::TEXT_PLAIN, src: String::new(), content: String::new()}).content);\n// 7th attempt: String and Option\u003CString> cannot be casted automagically. Compiler suggested using `Option::Some()`.\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type: mime::TEXT_PLAIN, src: Option::Some(), content: String::new()}).content);\n*/\n\n// xth attempt: Solution. Option::Some() requires a new String object.\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type: mime::TEXT_PLAIN, src: Option::Some(String::new()), content: String::new()}).content);\n```\n\nThis approach did not feel satisfying, since the code line is complicated to read, and required manual work without help from Code Suggestions. Taking a step back, I reviewed what brought me there - if `Option` is `none`, `unwrap()` will throw an error. Maybe there is an easier way to handle this? I asked Code Suggestions in a new comment:\n\n```\n                // xth attempt: Solution. Option::Some() requires a new String object.\n                println!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type: mime::TEXT_PLAIN, src: Option::Some(String::new()), content: String::new()}).content);\n\n                // Alternatively, use Option.is_none()\n```\n\n![Code suggestions asked for alternative with Options.is_none](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_after_complex_unwrap_or_else_ask_for_alternative_option.png){: .shadow}\n\nIncreased readability, less CPU cycles wasted on `unwrap()`, and a great learning curve from solving a complex problem to using a boring solution. Win-win.\n\nBefore we forget: Re-add storing the XML data on disk to complete the reader app again.\n\n```rust\n                // Dump the parsed body to a file, as name-current-iso-date.xml\n                let file_name = format!(\"{}-{}.xml\", thread_name, chrono::Local::now().format(\"%Y-%m-%d-%H-%M-%S\"));\n                let mut file = std::fs::File::create(file_name).unwrap();\n                file.write_all(body.as_ref()).unwrap();\n```\n\nBuild and run the program to verify the output.\n\n```shell\ncargo build\n\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https://www.cncf.io/feed/\"\n```\n\n![VS Code Terminal, cargo run with formatted feed entries output](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_cargo_run_formatted_output_final.png)\n\n## Benchmarks\n\n### Sequential vs. Parallel execution benchmark\nCompare the execution time benchmarks by creating five samples each.\n\n1. Sequential execution. [Example source code MR](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader/-/merge_requests/1)\n2. Parallel exeuction. [Example source code MR](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader/-/merge_requests/3)\n\n```shell\n# Sequential\ngit checkout sequential-exec\n\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https://www.cncf.io/feed/\"\n\n0.21s user 0.08s system 10% cpu 2.898 total\n0.21s user 0.08s system 11% cpu 2.585 total\n0.21s user 0.09s system 10% cpu 2.946 total\n0.19s user 0.08s system 10% cpu 2.714 total\n0.20s user 0.10s system 10% cpu 2.808 total\n```\n\n```shell\n# Parallel\ngit checkout parallel-exec\n\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https://www.cncf.io/feed/\"\n\n0.19s user 0.08s system 17% cpu 1.515 total\n0.18s user 0.08s system 16% cpu 1.561 total\n0.18s user 0.07s system 17% cpu 1.414 total\n0.19s user 0.08s system 18% cpu 1.447 total\n0.17s user 0.08s system 16% cpu 1.453 total\n```\n\nThe CPU usage increased for parallel execution of four RSS feed threads, but it nearly halved the total time compared to sequential execution. With that in mind, we can continue learning Rust and optimize the code and functionality.\n\nNote that we are running the debug build through Cargo, and not the optimized released builds yet. There are caveats with parallel execution though: Some HTTP endpoints put rate limits in place, where parallelism could hit these thresholds easier.\n\nThe system executing multiple threads in parallel might get overloaded too – threads require context switching in the Kernel, assigning resources to each thread. While one thread gets computing resources, other threads are put to sleep. If there are too many threads spawned, this might slow down the system, rather than speeding up the operations. Solutions include design patterns such as [work queues](https://docs.rs/work-queue/latest/work_queue/), where the caller adds a task into a queue, and a defined number of worker threads pick up the tasks for asynchronous execution.\n\nRust also provides data synchronisation between threads, so-called [channels](https://doc.rust-lang.org/rust-by-example/std_misc/channels.html). To ensure concurrent data access, [mutexes](https://doc.rust-lang.org/std/sync/struct.Mutex.html) are available to provide safe locks.\n\n### CI/CD with Rust caching\nAdd the following CI/CD configuration into the `.gitlab-ci.yml` file. The `run-latest` job calls `cargo run` with RSS feed URL examples, and measures the execution time continuously.\n\n```\nstages:\n  - build\n  - test\n  - run\n\ndefault:\n  image: rust:latest\n  cache:\n    key: ${CI_COMMIT_REF_SLUG}\n    paths:\n      - .cargo/bin\n      - .cargo/registry/index\n      - .cargo/registry/cache\n      - target/debug/deps\n      - target/debug/build\n    policy: pull-push\n\n# Cargo data needs to be in the project directory for being cached.\nvariables:\n  CARGO_HOME: ${CI_PROJECT_DIR}/.cargo\n\nbuild-latest:\n  stage: build\n  script:\n    - cargo build --verbose\n\ntest-latest:\n  stage: build\n  script:\n    - cargo test --verbose\n\nrun-latest:\n  stage: run\n  script:\n    - time cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https://www.cncf.io/feed/\"\n```\n\n![GitLab CI/CD pipelines for Rust, cargo run output](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/gitlab_cicd_pipeline_rust_cargo_run_output.png){: .shadow}\n\n## What is next\nThis blog post was challenging to create, with both learning advanced Rust programming techniques myself, and finding a good learning curve with Code Suggestions. The latter greatly helps with quickly generating code, not just boilerplate snippets – it understands the local context, and better understands the purpose and scope of the algorithm, the more code you write. After reading this blog post, you know of a few challenges and turnarounds. The example solution code for the reader app is available in [the learn-rust-ai-app-reader project](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader).\n\nParsing RSS feeds is challenging since it involves data structures, with external HTTP requests and parallel optimizations. As an experienced Rust user, you might have wondered: `Why not use the std::rss crate?` -- It is optimized for advanced asynchronous execution, and does not allow to show and explain the different Rust functionalities, explained in this blog post. As an async exercise, try to rewrite the code using the [`rss` crate](https://docs.rs/rss/latest/rss/).\n\n### Async learning exercises\nThe lessons learned in this blog post also lay the foundation for future exploration with persistent storage and presenting the data. Here are a few ideas where you can continue learning Rust and optimize the reader app:\n\n1. Data storage: Use a database like sqlite, and RSS feed update tracking.\n2. Notifications: Spawn child processes to trigger notifications into Telegram, etc.\n3. Functionality: Extend the reader types to REST APIs\n4. Configuration: Add support for configuration files for RSS feeds, APIs, etc.\n5. Efficiency: Add support for filters, and subscribed tags.\n6. Deployments: Use a webserver, collect Prometheus metrics, and deploy to Kubernetes.\n\nIn a future blog post, we will discuss some of these ideas, and how to implement them. Dive into existing RSS feed implementations, and learn how you can refactor the existing code into leveraging more Rust libraries (`crates`).\n\n### Share your feedback\nWhen you use [GitLab Duo](/gitlab-duo/) Code Suggestions, please [share your thoughts in the feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/405152).\n",[703,2283,724,746,9],"careers",{"slug":2285,"featured":6,"template":680},"learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions","content:en-us:blog:learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions.yml","Learn Advanced Rust Programming With A Little Help From Ai Code Suggestions","en-us/blog/learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions.yml","en-us/blog/learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions",{"_path":2291,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2292,"content":2297,"config":2301,"_id":2303,"_type":14,"title":2304,"_source":16,"_file":2305,"_stem":2306,"_extension":19},"/en-us/blog/learning-python-with-a-little-help-from-ai-code-suggestions",{"title":2293,"description":2294,"ogTitle":2293,"ogDescription":2294,"noIndex":6,"ogImage":2057,"ogUrl":2295,"ogSiteName":693,"ogType":694,"canonicalUrls":2295,"schema":2296},"Learning Python with a little help from AI","Use this guided tutorial, along with GitLab Duo Code Suggestions, to learn a new programming language.","https://about.gitlab.com/blog/learning-python-with-a-little-help-from-ai-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learning Python with a little help from AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-11-09\",\n      }",{"title":2293,"description":2294,"authors":2298,"heroImage":2057,"date":1641,"body":2299,"category":10,"tags":2300},[1217],"\nLearning a new programming language can help broaden your software development expertise, open career opportunities, or create fun challenges. However, it can be difficult to decide on one specific approach to learning a new language. Artificial intelligence (AI) can help. In this tutorial, you'll learn how to leverage AI-powered GitLab Duo Code Suggestions for a guided experience in learning the Python programming language with a practical hands-on example.\n\n- [Preparations](#preparations)\n  - [VS Code](#vs-code)\n  - [Code Suggestions](#code-suggestions)\n- [Learning a new programming language: Python](#learning-a-new-programming-language-python)\n    - [Development environment for Python](#development-environment-for-python)\n    - [Hello, World](#hello-world)\n- [Start learning Python with a practical example](#start-learning-python-with-a-practical-example)\n    - [Define variables and print them](#define-variables-and-print-them)\n    - [Explore variable types](#explore-variable-types)\n- [File I/O: Read and print a log file](#file-io-read-and-print-a-log-file)\n- [Flow control](#flow-control)\n    - [Loops and lists to collect files](#loops-and-lists-to-collect-files)\n    - [Conditionally collect files](#conditionally-collect-files)\n- [Functions](#functions)\n    - [Start with a simple log format](#start-with-a-simple-log-format)\n    - [String and data structure operations](#string-and-data-structure-operations)\n    - [Parse log files using regular expressions](#parse-log-files-using-regular-expressions)\n    - [Advanced log format: auth.log](#advanced-log-format-authlog)\n    - [Parsing more types: Structured logging](#parsing-more-types-structured-logging)\n- [Printing results and formatting](#printing-results-and-formatting)\n- [Dependency management and continuous verification](#dependency-management-and-continuous-verification)\n    - [Pip and pyenv: Bringing structure into Python](#pip-and-pyenv-bringing-structure-into-python)\n    - [Automation: Configure CI/CD pipeline for Python](#automation-configure-cicd-pipeline-for-python)\n- [What is next](#what-is-next)\n    - [Async learning exercises](#async-learning-exercises)\n    - [Share your feedback](#share-your-feedback)\n\n## Preparations \n\nChoose your [preferred and supported IDE](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-in-other-ides-and-editors), and follow the documentation to enable Code Suggestions for [GitLab.com SaaS](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-gitlab-saas) or [GitLab self-managed instances](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-self-managed-gitlab).\n\nProgramming languages can require installing the language interpreter command-line tools or compilers that generate binaries from source code to build and run the application.\n\n**Tip:** You can also use [GitLab Remote Development workspaces](/blog/quick-start-guide-for-gitlab-workspaces/) to create your own cloud development environments, instead of local development environments. This blog post focuses on using VS Code and the GitLab Web IDE. \n\n### VS Code\n\n[Install VS Code](https://code.visualstudio.com/download) on your client, and open it. Navigate to the `Extensions` menu and search for `gitlab workflow`. Install the [GitLab Workflow extension for VS Code](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow). VS Code will also detect the programming languages, and offer to install additional plugins for syntax highlighting and development experience. For example, install the [Python extension](https://marketplace.visualstudio.com/items?itemName=ms-python.python).\n\n### Code Suggestions\n\nFamiliarize yourself with suggestions before actually verifying the suggestions. GitLab Duo Code Suggestions are provided as you type, so you do not need use specific keyboard shortcuts. To accept a code suggestion, press the `tab` key. Also note that writing new code works more reliably than refactoring existing code. AI is non-deterministic, which means that the same suggestion may not be repeated after deleting the code suggestion. While Code Suggestions is in Beta, we are working on improving the accuracy of generated content overall. Please review the [known limitations](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#known-limitations), as this could affect your learning experience.\n\n**Tip:** The latest release of Code Suggestions supports multiline instructions. You can refine the specifications to your needs to get better suggestions. We will practice this method throughout the blog post.\n\n## Learning a new programming language: Python  \n\nNow, let's dig into learning Python, which is one of the [supported languages in Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#supported-languages). \n\nBefore diving into the source code, make sure to set up your development environment.\n\n### Development environment for Python \n\n1) Create a new project `learn-python-ai` in GitLab, and clone the project into your development environment. All code snippets are available in this [\"Learn Python with AI\" project](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-python-ai).\n\n```shell\ngit clone https://gitlab.com/NAMESPACE/learn-python-ai.git\n\ncd learn-python-ai\n\ngit status\n```\n\n2) Install Python and the build toolchain. Example on macOS using Homebrew:\n\n```\nbrew install python\n```\n\n3) Consider adding a `.gitignore` file for Python, for example this [.gitignore template for Python](https://gitlab.com/gitlab-org/gitlab/-/blob/master/vendor/gitignore/Python.gitignore?ref_type=heads). \n\nYou are all set to learn Python! \n\n### Hello, World\n\nStart your learning journey in the [official documentation](https://www.python.org/about/gettingstarted/), and review the linked resources, for example, the [Python tutorial](https://docs.python.org/3/tutorial/index.html). The [library](https://docs.python.org/3/library/index.html) and [language reference](https://docs.python.org/3/reference/index.html) documentation can be helpful, too. \n\n**Tip:** When I touched base with Python in 2005, I did not have many use cases except as a framework to test Windows 2000 drivers. Later, in 2016, I refreshed my knowledge with the book \"Head First Python, 2nd Edition,\" providing great practical examples for the best learning experience – two weeks later, I could explain the differences between Python 2 and 3. You do not need to worry about Python 2 – it has been deprecated some years ago, and we will focus only on Python 3 in this blog post. In August 2023, \"[Head First Python, 3rd Edition](https://www.oreilly.com/library/view/head-first-python/9781492051282/)\" was published. The book provides a great learning resource, along with the exercises shared in this blog post. \n\nCreate a new file `hello.py` in the root directory of the project and start with a comment saying `# Hello world`. Review and accept the suggestion by pressing the `tab` key and save the file (keyboard shortcut: cmd s). \n\n```\n# Hello world\n```\n\nCommit the change to the Git repository. In VS Code, use the keyboard shortcut `ctrl shift G`, add a commit message, and hit `cmd enter` to submit. \n\nUse the command palette (`cmd shift p`) and search for `create terminal` to open a new terminal. Run the code with the Python interpreter. On macOS, the binary from Homebrew is called `python3`, other operating systems and distributions might use `python` without the version.\n\n```shell\npython3 hello.py\n```\n\n![Hello World, hello GitLab Duo Code Suggestions](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_python_code_sugestions_hello_world.png)\n\n**Tip:** Adding code comments in Python starting with the `#` character before you start writing a function or algorithm will help Code Suggestions with more context to provide better suggestions. In the example above, we did that with `# Hello world`, and will continue doing so in the next exercises.\n\nAdd `hello.py` to Git, commit all changes and push them to your GitLab project.\n\n```shell\ngit add hello.py\n\ngit commit -avm \"Initialize Python\"\n\ngit push\n```\n\nThe source code for all exercises in this blog post is available in this [\"Learn Python with AI\" project](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-python-ai).\n\n## Start learning Python with a practical example \n\nThe learning goal in the following sections involves diving into the language datatypes, variables, flow control, and functions. We will also look into file operations, string parsing, and data structure operations for printing the results. The exercises will help build a command-line application that reads different log formats, works with the data, and provides a summary. This will be the foundation for future projects that fetch logs from REST APIs, and inspire more ideas such as rendering images, creating a web server, or adding Observability metrics.\n\n![Parsing log files into structured objects, example result after following the exercises](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_terminal_parsing_logs_and_pretty_print_results.png)\n\nAs an experienced admin, you can put the script into production and use real-world log format exmples. Parsing and analyzing logs in stressful production incidents can be time-consuming. A local CLI tool is sometimes faster than a log management tool.\n\nLet's get started: Create a new file called `log_reader.py` in the directory root, add it to Git, and create a Git commit.\n\n### Define variables and print them\n\nAs a first step, we need to define the log files location, and the expected file suffix. Therefore, let's create two variables and print them. Actually, ask Code Suggestions to do that for you by writing only the code comments and accepting the suggestions. Sometimes, you need to experiment with suggestions and delete already accepted code blocks. Do not worry – the quality of the suggestions will improve over time as the model generates better suggestions with more context.\n\n![Define log path and file suffix variables](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_reader_variables_01.png){: .shadow}\n\n![Print the variables to verify](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_reader_variables_02.png){: .shadow}\n\n```python\n# Specify the path and file suffix in variables\npath = '/var/log/'\nfile_suffix = '.log'\n\n# Print the variables \n\nprint(path)\nprint(file_suffix)\n```\n\nNavigate into the VS Code terminal and run the Python script:\n\n```shell\npython3 log_reader.py\n```\n\n![VS Code terminal, printing the variables](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_terminal_print_variables.png)\n\nPython supports many different types in the [standard library](https://docs.python.org/3/library/index.html). Most common types are: Numeric (int, float, complex), Boolean (True, False), and String (str). Data structures include support for lists, tuples, and dictionaries. \n\n### Explore variable types \n\nTo practice different variable types, let's define a limit of log files to read as a variable with the `integer` type.\n\n![Log file variable](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_reader_variables_03.png){: .shadow}\n\n```python\n# Define log file limit variable \nlog_file_limit = 1024 \n```\n\nCreate a Boolean variable that forces to read all files in the directory, no matter the log file suffix. \n\n```python\n# Define boolean variable whether to read all files recursively\nread_all_files_recursively = True\n```\n\n## File I/O: Read and print a log file\n\nCreate a directory called `log-data` in your project tree. You can copy all file examples from the [log-data directory in the example project](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-python-ai/-/tree/main/log-data?ref_type=heads).  \n\nCreate a new file `sample.log` with the following content, or any other two lines that provide a different message at the end.\n\n```\nOct 17 00:00:04 ebpf-chaos systemd[1]: dpkg-db-backup.service: Deactivated successfully.\nOct 17 00:00:04 ebpf-chaos systemd[1]: Finished Daily dpkg database backup service.\n```\n\nInstruct Code Suggestions to read the file `log-data/sample.log` and print the content. \n\n![Code Suggestions: Read log file and print it](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_read_log_file_and_print.png){: .shadow}\n\n```python\n# Read the file in log-data/sample.log and print its content\nwith open('log-data/sample.log', 'r') as f:\n    print(f.read())\n```\n\n**Tip:** You will notice the indent here. The `with open() as f:` statement opens a new scope where `f` is available as stream. This flow requires indenting )`tab`) the code block, and perform actions in this scope, calling `f.read()` to read the file contents, and passing the immediate value as parameter into the `print()` function.\n\nNavigate into the terminal, and run the script again with `python3 log_reader.py`. You will see the file content shown in the VS Code editor, also printed into the terminal.\n\n![VS Code terminal: Read log file, and print it](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_terminal_print_logfile_show_file_sample.png)\n\n## Flow control \n\nReading one log file is not enough – we want to analyze all files in a given directory recursively. For the next exercise, we instruct Code Suggestions to create an index of all files. \n\nPrepare the `log-data` directory with more example files from the [log-data directory in the example project](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-python-ai/-/tree/main/log-data?ref_type=heads). The directory tree should look as follows:\n\n```shell\ntree log-data                                                             ─╯\nlog-data\n├── sample.log\n└── var\n    └── log\n        ├── auth.log\n        ├── syslog.log\n        └── syslog_structured.log\n\n3 directories, 4 files\n```\n\n### Loops and lists to collect files \n\nModify the `path` variable to use the value `log-data/`. \n\n```python\n# Specify the path and file suffix in variables\npath = 'log-data/'\nfile_suffix = '.log'\n```\n\nTell Code Suggestions to read all file paths in the directory into a list. After the collection loop, print the list of file paths. \n\n```python\n# Read all file paths in the directory into a list\n\n# Print the list of log file paths\n```\n\n![Code Suggestion, collect file paths](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_collect_files.png){: .shadow}\n\nAn example of a suggestion could look like this:\n\n```python\n# Read all file paths in the directory into a list\nimport os\n\n# Read all file paths in the directory into a list\nlog_files = []\nfor root, directories, files in os.walk(path):\n    for file in files:\n        if file.endswith(file_suffix):\n            log_files.append(os.path.join(root, file))\n\n# Print the list of log file paths\n\nprint(log_files)\n```\n\nLet's analyze what happens here: The `import os` statement is required to bring the `os` library into the current scope, and we are able to call `os.walk()` later. The `log_files` [list](https://docs.python.org/3/library/stdtypes.html#sequence-types-list-tuple-range) is initialized as an empty list using empty brackets. `os.walk()` expects a path as directory tree to start searching for files. The function returns a generator object, which will walk the directory tree recursively. The returned tuples include the collected files, which we want to access in a loop iteraor, using the `for` loop notation. When the file ends with the defined suffix, its full path is appended to the list. This is an optimized and complex example. Do not worry about understanding everything just yet - we will revisit this later. \n\n### Conditionally collect files \n\nModify the instructions and add the Boolean flag evaluation for `read_all_files_recursively` as an input to the glob() function. \n\n```python\n# Read all file paths in the top level directory into a list\n# If read_all_files_recursively is True, read all files recursively\n```\n\n![Code Suggestions, read files recursively conditionally](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_read_files_recursively_conditionally.png){: .shadow}\n\n```\n# Read all file paths in the top level directory \nimport os\n\n# Read all file paths in the top level directory into a list\n# If read_all_files_recursively is True, read all files recursively\n\nlog_files = []\nfor file in os.listdir(path):\n    if file.endswith(file_suffix):\n        log_files.append(os.path.join(path, file))\n\nif read_all_files_recursively:\n    for root, directories, files in os.walk(path):\n        for file in files:\n            if file.endswith(file_suffix):               \n                log_files.append(os.path.join(root, file))\n\n# Print log_files\nprint(log_files)\n```\n\nThe result is not optimal yet because it always executes the first loop, and optionally the second loop. This flow leads to duplicated results when the script is executed.\n\n```\npython3 log_reader.py\n\n['log-data/sample.log', 'log-data/sample.log', 'log-data/var/log/auth.log']\n```\n\nExperiment with Code Suggestions instructions to get a solution for the problem. There are different approaches you can take: \n\n1) A potential solution is to wrap the source code into an if-then-else block, and move the `os.listdir()` loop into the else-block. \n\n```python\nif read_all_files_recursively:\n    for root, directories, files in os.walk(path):\n        for file in files:\n            if file.endswith(file_suffix):               \n                log_files.append(os.path.join(root, file))\nelse:\n    for file in os.listdir(path):\n        if file.endswith(file_suffix):\n            log_files.append(os.path.join(path, file))  \n\n```\n\n2) Alternatively, do not use `append()` to always add a new list entry, but check if the item exists in the list first. \n\n```python\nfor file in os.listdir(path):\n    if file.endswith(file_suffix):\n        # check if the entry exists in the list already\n        if os.path.isfile(os.path.join(path, file)):\n            log_files.append(os.path.join(path, file))\n\nif read_all_files_recursively:\n    for root, directories, files in os.walk(path):\n        for file in files:\n            if file.endswith(file_suffix):\n                # check if the entry exists in the list already\n                if file not in log_files:\n                    log_files.append(os.path.join(root, file))\n```\n\n3) Or, we could eliminate duplicate entries after collecting all items. Python allows converting lists into [sets](https://docs.python.org/3/library/stdtypes.html#set-types-set-frozenset), which hold unique entries. After applying `set()`, you can again convert the set back into a list. Code Suggestions knows about this possibility, and will help with the comment `# Ensure that only unique file paths are in the list` \n\n![Code Suggestions, converting a list to unique items](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_unique_list.png){: .shadow}\n\n```python\n# Ensure that only unique file paths are in the list\n\nlog_files = list(set(log_files))\n```\n\n4) Take a step back and evaluate whether the variable read_all_files_recursively makes sense. Maybe the default behavior should just be reading all files recursively?\n\n**Tip for testing different paths in VS Code:** Select the code blocks, and press [`cmd /` on macOS](https://code.visualstudio.com/docs/getstarted/keybindings) to comment out the code. \n\n## Functions \n\nLet's create a function called `parse_log_file` that parses a log file, and returns the extracted data. We will define the expected log format and columns to extract, following the [syslog format specification](https://en.wikipedia.org/wiki/Syslog). There are different log format types and also customized formats by developers that need to be taken into account – exercise for later. \n\n### Start with a simple log format \n\nInspect a running Linux VM, or use the following example log file example for additional implementation.\n\n```\nless /var/log/syslog | grep -v docker \n\nOct 17 00:00:04 ebpf-chaos systemd[1]: Starting Daily dpkg database backup service...\nOct 17 00:00:04 ebpf-chaos systemd[1]: Starting Rotate log files...\nOct 17 00:00:04 ebpf-chaos systemd[1]: dpkg-db-backup.service: Deactivated successfully.\nOct 17 00:00:04 ebpf-chaos systemd[1]: Finished Daily dpkg database backup service.\nOct 17 00:00:04 ebpf-chaos systemd[1]: logrotate.service: Deactivated successfully.\nOct 17 00:00:04 ebpf-chaos systemd[1]: Finished Rotate log files.\nOct 17 00:17:01 ebpf-chaos CRON[727495]: (root) CMD (   cd / && run-parts --report /etc/cron.hourly)\n```\n\nWe can create an algorithm to split each log line by whitespaces, and then join the results again. Let's ask Code Suggestions for help. \n\n```python\n# Split log line \"Oct 17 00:00:04 ebpf-chaos systemd[1]: Finished Rotate log files.\" by whitespaces and save in a list\n\nlog_line = \"Oct 17 00:00:04 ebpf-chaos systemd[1]: Finished Rotate log files.\"\nlog_line_split = log_line.split(\" \")\nprint(log_line_split)\n```\n\nRun the script again to verify the result.\n\n```shell\npython3 log_reader.py\n\n['Oct', '17', '00:00:04', 'ebpf-chaos', 'systemd[1]:', 'Finished', 'Rotate', 'log', 'files.']\n```\n\nThe first three items are part of the datetime string, followed by the host, service, and remaining log message items. Let's practice string operations in Python as the next step. \n\n### String and data structure operations\n\nLet's ask Code Suggestions for help with learning to join strings, and perform list operations.\n\n1. Join the first three items with a whitespace again. \n2. Keep host and service. \n3. Join the remaining variable item count into a string, separated with whitespaces, again. \n4. Store the identified column keys, and their respective values in a new data structure: [dictionary](https://docs.python.org/3/library/stdtypes.html#mapping-types-dict). \n\n![Code suggestions for list items with string operations](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_list_items_string_join_extract.png){: .shadow}\n\n```shell \npython3 log_reader.py\n\n# Array\n['Oct', '17', '00:00:04', 'ebpf-chaos', 'systemd[1]:', 'Finished', 'Rotate', 'log', 'files.']\n\n# Dictionary \n{'datetime': 'Oct 17 00:00:04', 'host': 'ebpf-chaos', 'service': 'systemd[1]:', 'message': ' ebpf-chaos systemd[1]: Finished Rotate log files.'}\n```\n\nA working suggestion can look like the following:\n\n```python\n# Initialize results dictionary with empty values for datetime, host, service, message\n# Loop over log line split \n# Join the first three list items as date string\n# Item 4: host \n# Item 5: service\n# Join the remaining items into a string, separated with whitespaces \n# Print the results after the loop \n\nresults = {'datetime': '', 'host': '', 'service': '', 'message': ''}\n\nfor item in log_line_split:\n\n    if results['datetime'] == '':\n        results['datetime'] = ' '.join(log_line_split[0:3])\n\n    elif results['host'] == '':\n        results['host'] = log_line_split[3]\n\n    elif results['service'] == '':\n        results['service'] = log_line_split[4]\n\n    else:\n        results['message'] += ' ' + item\n\nprint(results)\n\n```\n\nThe suggested algorithm loops over all log line items, and applies the same operation for the first three items. `log_line_split[0:3]` extracts a slice of three items into a new list. Calling `join()` on a separator character and passing the array as an argument joins the items into a string. The algorithm continues to check for not initialized values for host (Item 4) and service (Item 5)and concludes with the remaining list items appended into the message string. To be honest, I would have used a slightly different algorithm, but it is a great learning curve to see other algorithms, and ways to implement them. Practice with different instructions, and data structures, and continue printing the data sets. \n\n**Tip:** If you need to terminate a script early, you can use `sys.exit()`. The remaining code will not be executed. \n\n```python\nimport sys \nsys.exit(1)\n```\n\nImagine doing these operations for different log formats, and message types – it can get complicated and error-prone very quickly. Maybe there is another approach. \n\n### Parse log files using regular expressions\n\nThere are different syslog format RFCs – [RFC 3164](https://datatracker.ietf.org/doc/html/rfc3164) is obsolete but still found in the wild as default configuration (matching the pattern above), while [RFC 5424](https://datatracker.ietf.org/doc/html/rfc5424) is more modern, including datetime with timezone information. Parsing this format can be tricky, so let's ask Code Suggestions for advice. \n\nIn some cases, the suggestions include regular expressions. They might not match immediately, making the code more complex to debug, with trial and errors. A good standalone resource to text and explain regular expressions is [regex101.com](https://regex101.com/).  \n\n**Tip:** You can skip diving deep into regular expressions using the following code snippet as a quick cheat. The next step involves instructing Code Suggestions to use these log patterns, and help us extract all valuable columns. \n\n```python\n# Define the syslog log format regex in a dictionary\n# Add entries for RFC3164, RFC5424\nregex_log_pattern = {\n    'rfc3164': '([A-Z][a-z][a-z]\\s{1,2}\\d{1,2}\\s\\d{2}[:]\\d{2}[:]\\d{2})\\s([\\w][\\w\\d\\.@-]*)\\s(.*)$',\n    'rfc5424': '(?:(\\d{4}[-]\\d{2}[-]\\d{2}[T]\\d{2}[:]\\d{2}[:]\\d{2}(?:\\.\\d{1,6})?(?:[+-]\\d{2}[:]\\d{2}|Z)?)|-)\\s(?:([\\w][\\w\\d\\.@-]*)|-)\\s(.*)$;'\n}\n```\n\nWe know what the function should do, and its input parameters – the file name, and a log pattern to match. The log lines should be split by this regular expression, returning a key-value dictionary for each log line. The function should return a list of dictionaries. \n\n```python\n# Create a function that parses a log file\n# Input parameter: file path\n# Match log line against regex_log_pattern\n# Return the results as dictionary list: log line, pattern, extracted columns\n```\n\n![Code suggestion based on a multiline comment instruction to get a function that parses a log file based on regex patterns](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_format_regex_function_instructions_01.png){: .shadow}\n\nRemember the indent for opening a new scope? The same applies for functions in Python. The `def` identifier requires a function name, and a list of parameters, followed by an opening colon. The next lines of code require the indent. VS Code will help with live-linting wrong indent, before the script execution fails, or the CI/CD pipelines. \n\nContinue with Code Suggestions – it might already know that you want to parse all log files, and parse them using the newly created function. \n\n![Code suggestion to parse all log files, and print the result set](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_format_regex_function_instructions_02.png){: .shadow}\n\nA full working example can look like this: \n\n```\nimport os\n\n# Specify the path and file suffix in variables\npath = 'log-data/'\nfile_suffix = '.log'\n\n# Read all file paths in the directory into a list\nlog_files = []\nfor root, directories, files in os.walk(path):\n    for file in files:\n        if file.endswith(file_suffix):\n            log_files.append(os.path.join(root, file))\n\n# Define the syslog log format regex in a dictionary\n# Add entries for RFC3164, RFC5424\nregex_log_pattern = {\n    'rfc3164': '([A-Z][a-z][a-z]\\s{1,2}\\d{1,2}\\s\\d{2}[:]\\d{2}[:]\\d{2})\\s([\\w][\\w\\d\\.@-]*)\\s(.*)$',\n    'rfc5424': '(?:(\\d{4}[-]\\d{2}[-]\\d{2}[T]\\d{2}[:]\\d{2}[:]\\d{2}(?:\\.\\d{1,6})?(?:[+-]\\d{2}[:]\\d{2}|Z)?)|-)\\s(?:([\\w][\\w\\d\\.@-]*)|-)\\s(.*)$;'\n}\n\n# Create a function that parses a log file\n# Input parameter: file path\n# Match log line against regex_log_pattern\n# Return the results as dictionary list: log line, pattern name, extracted columns\nimport re\n\ndef parse_log_file(file_path):\n    # Read the log file\n    with open(file_path, 'r') as f:\n        log_lines = f.readlines()\n\n    # Create a list to store the results\n    results = []\n\n    # Iterate over the log lines\n    for log_line in log_lines:\n        # Match the log line against the regex pattern\n        for pattern_name, pattern in regex_log_pattern.items():\n            match = re.match(pattern, log_line)\n\n            # If the log line matches the pattern, add the results to the list\n            if match:\n                extracted_columns = match.groups()\n                results.append({\n                    'log_line': log_line,\n                    'pattern_name': pattern_name,\n                    'extracted_columns': extracted_columns,\n                    'source_file': file_path\n                })\n\n    # Return the results\n    return results\n\n# Parse all files and print results\nfor log_file in log_files:\n    results = parse_log_file(log_file)\n    print(results)\n```\n\nLet's unpack what the `parse_log_file()` function does:\n\n1. Opens the file from `file_path` parameter. \n2. Reads all lines into a new variable `log_lines`. \n3. Creates a results list to store all items. \n4. Iterates over the log lines. \n5. Matches against all regex patterns configured in regex_log_pattern. \n6. If a match is found, extracts the matching column values.\n7. Creates a results item, including the values for the keys `log_line`, `pattern_name`, `extracted_colums`, `source_file`. \n8. Appends the results item to the results list.\n9. Returns the results list. \n\nThere are different variations to this – especially for the returned result data structure. For this specific case, log lines come as list already. Adding a dictionary object instead of a raw log line allows function callers to extract the desired information in the next step. Once a working example has been implemented, you can refactor the code later, too. \n\n### Advanced log format: auth.log\n\nParsing the syslog on a Linux distribution might not unveil the necessary data to analyze. On a virtual machine that exposes port 22 (SSH) to the world, the authentication log is much more interesting – plenty of bots and malicious actors testing default password combinations and often brute force attacks.\n\nThe following snippet from `/var/log/auth.log` on one of my private servers shows the authentication log format and the random attempts from bots using different usernames, etc. \n\n```\nOct 15 00:00:19 ebpf-chaos sshd[3967944]: Failed password for invalid user ubuntu from 93.254.246.194 port 48840 ssh2\nOct 15 00:00:20 ebpf-chaos sshd[3967916]: Failed password for root from 180.101.88.227 port 44397 ssh2\nOct 15 00:00:21 ebpf-chaos sshd[3967944]: Received disconnect from 93.254.246.194 port 48840:11: Bye Bye [preauth]\nOct 15 00:00:21 ebpf-chaos sshd[3967944]: Disconnected from invalid user ubuntu 93.254.246.194 port 48840 [preauth]\nOct 15 00:00:24 ebpf-chaos sshd[3967916]: Failed password for root from 180.101.88.227 port 44397 ssh2\nOct 15 00:00:25 ebpf-chaos sshd[3967916]: Received disconnect from 180.101.88.227 port 44397:11:  [preauth]\nOct 15 00:00:25 ebpf-chaos sshd[3967916]: Disconnected from authenticating user root 180.101.88.227 port 44397 [preauth]\nOct 15 00:00:25 ebpf-chaos sshd[3967916]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=180.101.88.227  user=root\nOct 15 00:00:25 ebpf-chaos sshd[3967998]: Invalid user teamspeak from 185.218.20.10 port 33436\n```\n\n**Tip for intrusion prevention:** Add a firewall setup, and use [fail2ban](https://en.wikipedia.org/wiki/Fail2ban) to block invalid auth logins. \n\nThe next exercise is to extend the logic to understand the free form log message parts, for example `Failed password for invalid user ubuntu from 93.254.246.194 port 48840 ssh2`. The task is to store the data in an optional dictionary with key value pairs. \n\nCreate a new function that takes the previously parsed log line results as input, and specifically parses the last list item for each line.\n\n1. Count the number of `Failed password` and `Invalid user` messages.\n2. Return the results with count, log file, pattern \n\n![Code suggestions for a log file message parser to count auth.log failures](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_parse_log_message_auth_log.png){: .shadow}\n\nA working suggestion can look like the following code:\n\n```python\n# Create a function that parses a log file message from the last extracted_columns entry \n# Input: Parsed log lines results list \n# Loop over all log lines in the list, and extract the last list item as message \n# Count failure strings in the message: Failed password, Invalid user \n# Return the results if failure count greater 0: log_file, count, failure string\ndef parse_log_file_message(results):\n    failure_results = []\n\n    # Iterate over the log lines\n    for result in results:\n        # Extract the message from the last list item\n        message = result['extracted_columns'][-1]\n\n        # Count the number of failure strings in the message\n        failure_count = message.count('Failed password') + message.count('Invalid user')\n\n        # If the failure count is greater than 0, add the results to the list\n        if failure_count > 0:\n            failure_results.append({\n                'log_file': result['source_file'],\n                'count': failure_count,\n                'failure_string': message\n            })\n\n    # Return the results\n    return failure_results\n\n# Parse all files and print results\nfor log_file in log_files:\n    results = parse_log_file(log_file)\n    failure_results = parse_log_file_message(results)\n    print(failure_results)\n```\n\nThe algorithm follows the previous implementations: First, create a results array to store matching data. Then, iterate over the already parsed log_lines in the list. Each log line contains the `extracted_columns` key, which holds the free-form message string at the end. The next step is to call the string object function `count()` to count how many times a given character sequence is contained in a string. The returned numbers are added up to the `failure_count` variable. If it is greater than zero, the result is added to the results list, including the `log_file`, `count` and `failure_string` key-value pairs. After returning the parsed log message results, loop through all log files, parse them, and print the results again. \n\nExecute the script to inspect the detected matches. Note that the data structure can be optimized in future learning steps.\n\n```\npython3 log_reader.py\n\n[{'log_file': 'log-data/var/log/auth.log', 'count': 1, 'failure_string': 'sshd[3967944]: Failed password for invalid user ubuntu from 93.254.246.194 port 48840 ssh2'}, {'log_file': 'log-data/var/log/auth.log', 'count': 1, 'failure_string': 'sshd[3967916]: Failed password for root from 180.101.88.227 port 44397 ssh2'}, {'log_file': 'log-data/var/log/auth.log', 'count': 1, 'failure_string': 'sshd[3967916]: Failed password for root from 180.101.88.227 port 44397 ssh2'}, {'log_file': 'log-data/var/log/auth.log', 'count': 1, 'failure_string': 'sshd[3967998]: Invalid user teamspeak from 185.218.20.10 port 33436'}, {'log_file': 'log-data/var/log/auth.log', 'count': 1, 'failure_string': 'sshd[3967998]: Failed password for invalid user teamspeak from 185.218.20.10 port 33436 ssh2'}, {'log_file': 'log-data/var/log/auth.log', 'count': 1, 'failure_string': 'sshd[3968077]: Invalid user mcserver from 218.211.33.146 port 50950'}]\n\n```\n\n### Parsing more types: Structured logging\n\nApplication developers can use the structured logging format to help machine parsers to extract the key value pairs. Prometheus provides this information in the following structure in syslog:\n\n```\nOct 17 19:00:10 ebpf-chaos prometheus[594]: ts=2023-10-17T19:00:10.425Z caller=compact.go:519 level=info component=tsdb m\nsg=\"write block\" mint=1697558404661 maxt=1697565600000 ulid=01HCZG4ZX51GTH8H7PVBYDF4N6 duration=148.675854ms\nOct 17 19:00:10 ebpf-chaos prometheus[594]: ts=2023-10-17T19:00:10.464Z caller=head.go:1213 level=info component=tsdb msg\n=\"Head GC completed\" caller=truncateMemory duration=6.845245ms\nOct 17 19:00:10 ebpf-chaos prometheus[594]: ts=2023-10-17T19:00:10.467Z caller=checkpoint.go:100 level=info component=tsd\nb msg=\"Creating checkpoint\" from_segment=2308 to_segment=2309 mint=1697565600000\nOct 17 19:00:10 ebpf-chaos prometheus[594]: ts=2023-10-17T19:00:10.517Z caller=head.go:1185 level=info component=tsdb msg\n=\"WAL checkpoint complete\" first=2308 last=2309 duration=50.052621ms\n```\n\nThis format is easier to parse for scripts, because the message part can be split by whitespaces, and the assignment character `=`. Strings that contain whitespaces are guaranteed to be enclosed with quotes. The downside is that not all programming language libraries provide ready-to-use structured logging libraries, making it harder for developers to adopt this format. \n\nPractice following the previous example to parse the `auth.log` format with additional information. Tell Code Suggestions that you are expecting structured logging format with key-value pairs, and which returned data structure would be great:\n\n```python\n# Create a function that parses a log file message from the last extracted_columns entry \n# Input: Parsed log lines results list \n# Loop over all log lines in the list, and extract the last list item as message \n# Parse structured logging key-value pairs into a dictionary\n# Return results: log_file, dictionary \n```\n\n![Code suggestions for parsing structured logging format in the log file message part](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_propose_structured_logging_message_parser.png){: .shadow}\n\n### Printing results and formatting\n\nMany of the examples used the `print()` statement to print the content on the terminal. Python objects in the standard library support text representation, and for some types it makes more sense (string, numbers), others cannot provide much details (functions, etc.). \n\nYou can also pretty-print almost any data structure (lists, sets, dictionaries) in Python. The JSON library can format data structures in a readable format, and use a given spaces indent to draw the JSON structure on the terminal. Note that we use the `import` statement here to bring libraries into the current scope, and access their methods, for example `json.dumps`. \n\n```python\nimport json \nprint(json.dumps(structured_results, indent=4))\n```\n\n![Parsing log files into structured objects, example result after following the exercises](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_terminal_parsing_logs_and_pretty_print_results.png)\n\nPractice with modifying the existing source code, and replace the code snippets where appropriate. Alternatively, create a new function that implements pretty printing.\n\n```python\n# Create a pretty print function with indent 4 \n```\n\n![Code suggestions for pretty-print function](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_pretty_print.png){: .shadow}\n\nThis idea works in a similar fashion with creating your own logger functions...but we have to stop learning and take a break. Before we conclude the first blog post in the learning series, let's ensure that CI/CD and dependencies are set up properly for future exercises and async practice. \n\n## Dependency management and continuous verification  \n\n### Pip and pyenv: Bringing structure into Python \n\nDependencies can be managed in the [`requirements.txt` file](https://pip.pypa.io/en/stable/reference/requirements-file-format/), including optional version dependencies. Using `requirements.txt` file also has the advantage of being the single source of truth for local development environments and running continuous builds with GitLab CI/CD. They can use the same installation command:\n\n```shell\npip install -r requirements.txt\n```\n\nSome Linux distributions do not install the pip package manager by default, for example, Ubuntu/Debian require to install the `python3-pip` package. \n\nYou can manage different virtual environments using [venv](https://docs.python.org/3/library/venv.html). This workflow can be beneficial to install Python dependencies into the virtual environment, instead of globally into the OS path which might break on upgrades. \n\n```shell\npip install virtualenv\nvirtualenv venv\nsource venv/bin/activate \n```\n\n### Automation: Configure CI/CD pipeline for Python\n\nThe [CI/CD pipeline](https://docs.gitlab.com/ee/ci/) should continuously lint, test, and build the code. You can mimic the steps from the local development, and add testing more environments and versions: \n\n1. Lint the source code and check for formatting errors. The example uses [Pyflakes](https://pypi.org/project/pyflakes/), a mature linter, and [Ruff](https://docs.astral.sh/ruff/ ), a fast linter written in Rust. \n2. Cache dependencies installed using the pip package manager, following the documentation for [Python caching in GitLab CI/CD](https://docs.gitlab.com/ee/ci/caching/#cache-python-dependencies). This saves time and resources on repeated CI/CD pipeline runs.\n3. Use parallel matrix builds to test different Python versions, based on the available container images on Docker Hub and their tags. \n\n```yaml\nstages:\n  - lint\n  - test\n\ndefault:\n  image: python:latest\n  cache:                      # Pip's cache doesn't store the python packages\n    paths:                    # https://pip.pypa.io/en/stable/topics/caching/\n      - .cache/pip\n  before_script:\n    - python -V               # Print out python version for debugging\n    - pip install virtualenv\n    - virtualenv venv\n    - source venv/bin/activate\n\nvariables:  # Change pip's cache directory to be inside the project directory since we can only cache local items.\n  PIP_CACHE_DIR: \"$CI_PROJECT_DIR/.cache/pip\"\n\n# lint template\n.lint-tmpl:\n  script:\n    - echo \"Linting Python version $VERSION\"\n  parallel:\n    matrix:\n      - VERSION: ['3.9', '3.10', '3.11', '3.12']   # https://hub.docker.com/_/python\n\n# Lint, using Pyflakes: https://pypi.org/project/pyflakes/ \nlint-pyflakes:\n  extends: [.lint-tmpl]\n  script:\n    - pip install -r requirements.txt\n    - find . -not -path './venv' -type f -name '*.py' -exec sh -c 'pyflakes {}' \\;\n\n# Lint, using Ruff (Rust): https://docs.astral.sh/ruff/ \nlint-ruff:\n  extends: [.lint-tmpl]\n  script:\n    - pip install -r requirements.txt\n    - ruff .\n```\n\n![GitLab CI/CD Python lint job view, part of matrix builds](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/gitlab_cicd_python_lint_job_log_view.png)\n\n## What is next \n\nFun fact: GitLab Duo Code Suggestions also helped writing this blog post in VS Code, knowing about the context. In the screenshot, I just wanted to add a tip about [regex101](https://regex101.com/), and GitLab Duo already knew. \n\n![Writing the GitLab blog post in VS Code with support from GitLab Duo Code Suggestions](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/gitlab_duo_code_suggestions_helping_write_the_learning_python_ai_blog_post.png)\n\nIn an upcoming blog, we will look into advanced learning examples with more practical (log) filtering and parallel operations, how to fetch logs from API endpoints (CI/CD job logs for example), and more data analytics and observability. Until then, here are a few recommendations for practicing async.\n\n### Async learning exercises\n\n- Implement the missing `log_file_limit` variable check. \n- Print a summary of the results in Markdown, not only JSON format. \n- Extend the script to accept a search filter as environment variable. Print/count only filtered results. \n- Extend the script to accept a date range. It might require parsing the datetime column in a time object to compare the range. \n- Inspect a GitLab CI/CD pipeline job log, and download the raw format. Extend the log parser to parse this specific format, and print a summary. \n\n### Share your feedback\n\nWhich programming language are you learning or considering learning? Start a new topic on our [community](/community/) forum or Discord and share your experience.\n\nWhen you use [GitLab Duo](/gitlab-duo/) Code Suggestions, please share your thoughts and feedback [in the feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/405152).\n",[482,724,746,9],{"slug":2302,"featured":6,"template":680},"learning-python-with-a-little-help-from-ai-code-suggestions","content:en-us:blog:learning-python-with-a-little-help-from-ai-code-suggestions.yml","Learning Python With A Little Help From Ai Code Suggestions","en-us/blog/learning-python-with-a-little-help-from-ai-code-suggestions.yml","en-us/blog/learning-python-with-a-little-help-from-ai-code-suggestions",{"_path":2308,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2309,"content":2313,"config":2318,"_id":2320,"_type":14,"title":2321,"_source":16,"_file":2322,"_stem":2323,"_extension":19},"/en-us/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started",{"title":2310,"description":2294,"ogTitle":2310,"ogDescription":2294,"noIndex":6,"ogImage":2057,"ogUrl":2311,"ogSiteName":693,"ogType":694,"canonicalUrls":2311,"schema":2312},"Learning Rust with a little help from AI","https://about.gitlab.com/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learning Rust with a little help from AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-08-10\",\n      }",{"title":2310,"description":2294,"authors":2314,"heroImage":2057,"date":2315,"body":2316,"category":10,"tags":2317},[1217],"2023-08-10","Learning a new programming language can help broaden your software development expertise, open career opportunities, or create fun challenges. However, it can be difficult to decide on one specific approach to learning a new language. Artificial intelligence (AI) can help. In this tutorial, you'll learn how to leverage AI-powered GitLab Duo Code Suggestions for a guided experience in learning the Rust programming language.\n\n- [Preparations](#preparations)\n  - [VS Code](#vs-code)\n  - [Code Suggestions](#code-suggestions)\n- [Learning a new programming language: Rust](#learning-a-new-programming-language-rust)\n    - [Development environment for Rust](#development-environment-for-rust)\n    - [Hello, World](#hello-world)\n- [Cargo: Bringing structure into Rust](#cargo-bringing-structure-into-rust)\n- [Automation: Configure CI/CD pipeline for Rust](#automation-configure-cicd-pipeline-for-rust)\n- [Continue learning Rust](#continue-learning-rust)\n    - [Define variables and print them](#define-variables-and-print-them)\n    - [Explore variable types](#explore-variable-types)\n    - [Flow control: Conditions and loops](#flow-control-conditions-and-loops)\n    - [Functions](#functions)\n    - [Testing](#testing)\n- [What is next](#what-is-next)\n    - [Async learning exercises](#async-learning-exercises)\n    - [Share your feedback](#share-your-feedback)\n\n## Preparations \nChoose your [preferred and supported IDE](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-in-other-ides-and-editors), and follow the documentation to enable code suggestions for [GitLab.com SaaS](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-gitlab-saas) or [GitLab self-managed instances](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-self-managed-gitlab).\n\nProgramming languages can require an install of the language interpreter command-line tools or compilers that generate binaries from source code to build and run the application.\n\nTip: You can also use [GitLab Remote Development workspaces](/blog/quick-start-guide-for-gitlab-workspaces/) to create your own cloud development environments, instead of local development environments. This blog post focuses on using VS Code and the GitLab Web IDE. \n\n### VS Code\nOn macOS, you can [install VS Code](https://code.visualstudio.com/download) as a Homebrew cask or manually download and install it. \n\n```shell\nbrew install --cask visual-studio-code \n```\n\nNavigate to the `Extensions` menu and search for `gitlab workflow`. Install the [GitLab workflow extension for VS Code](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow). \n\nTip: VS Code will also detect the programming languages, and offer to install additional plugins for syntax highlighting and development experience. \n\n### Code Suggestions\nIt can help to familiarize yourself with suggestions before actually verifying the suggestions. GitLab Code Suggestions are provided as you type, so you do not need use specific keyboard shortcuts. To accept a code suggestion, press the `tab` key. Also note that writing new code works more reliably than refactoring existing code. AI is non-deterministic, which means that the same suggestion may not be repeated after deleting the code suggestion. While Code Suggestions is in Beta, we are working on improving the accuracy of generated content overall. Please review the [known limitations](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#known-limitations), as this could affect your learning experience. \n\n## Learning a new programming language: Rust \nNow, let's dig into learning Rust, which is one of the [supported languages in Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#supported-languages). \n\n[Rust by Example](https://doc.rust-lang.org/rust-by-example/) provides a great tutorial for beginners, together with the official [Rust book](https://doc.rust-lang.org/book/). The [Hands-on Rust book](https://hands-on-rust.com/) shows how to build a 2D game as a more practical approach. More examples are shared in [this Rust book list](https://github.com/sger/RustBooks). \n\nBefore diving into the source code, make sure to set up your development environment.\n\n### Development environment for Rust\n1) Create a new project `learn-rust-ai` in GitLab, and clone the project into your development environment. All code snippets are available in [this \"Learn Rust with AI\" project](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai).\n\n```shell\ngit clone https://gitlab.com/NAMESPACE/learn-rust-ai.git\n\ncd learn-rust-ai\n\ngit status\n```\n\n2) Install Rust and the build toolchain. Fortunately, this is straightforward [following the Rust install documentation](https://www.rust-lang.org/tools/install).\n\nTip for using the generic installer: Download the script and run it after review. \n\n```\n# Download and print the script before running it\ncurl -Lvs https://sh.rustup.rs -o rustup-init.sh\n\n# Run the Rust installer script\nsh rustup-init.sh \n```\n\nExample on macOS using Homebrew:\n\n```\nbrew install rust\n```\n\n1) Optional: Install the [rust-analyzer VS Code extension](https://marketplace.visualstudio.com/items?itemName=rust-lang.rust-analyzer).\n\n2) Each exercise will invite you to compile the code with the [`rustc` command](https://doc.rust-lang.org/rustc/what-is-rustc.html), and later using [`cargo` as build tool and package manager](https://doc.rust-lang.org/cargo/index.html).\n\nYou are all set to learn Rust! \n\n### Hello, World\nWe will start with [Rust by Example](https://doc.rust-lang.org/rust-by-example/), and follow the [Hello, World exercise](https://doc.rust-lang.org/rust-by-example/hello.html).\n\nCreate a new file `hello.rs` in the root directory of the project and start with a comment saying `// Hello world`. Next, start writing the `main` function, and verify the code suggestion.\n\n![VS Code hello.rs Rust code suggestion, asking to accept](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_hello_world_suggested.png){: .shadow}\n\nAccept the suggestion by pressing the `tab` key and save the file (keyboard shortcut: cmd s). \n\n```\n// Hello world\n\nfn main() {\n    println!(\"Hello, world!\");\n}\n```\n\n![VS Code hello.rs Rust code suggestion, accepted](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_hello_world_accepted.png){: .shadow}\n\nCommit the change to the Git repository. In VS Code, use the keyboard shortcut `ctrl shift G`, add a commit message, and hit `cmd enter` to submit. \n\nUse the command palette (`cmd shift p`) and search for `create terminal` to open a new terminal. \n\nBuild and run the code.\n\n```shell\nrustc hello.rs\n\n./hello\n```\n\n![hello.rs Rust code suggestion, accepted, compiled, run](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_hello_world_cli_build.png){: .shadow}\n\nTip: Adding [code comments in Rust (`//`)](https://doc.rust-lang.org/reference/comments.html) before you start writing a function or algorithm will help Code Suggestions with more context to provide better suggestions. In the example above, we did that with `// Hello world`, and will continue doing so in the next exercises. \n\n## Cargo: Bringing structure into Rust\n[Cargo](https://doc.rust-lang.org/rust-by-example/cargo.html) is the official Rust package management tool. It is more than that - you can run build and test commands because Cargo understands them as well. \n\nYou can initialize a new Cargo configuration in the current directory tree with the following command:\n\n```shell\ncargo init\n```\n\nThe directory tree invites you to add the source code into the `src/` directory, while `Cargo.toml` manages the dependencies and used compiler versions. The `.gitignore` file is also added including best practices. \n\n```shell\ntree\n.\n├── Cargo.toml\n├── README.md\n├── hello\n├── hello.rs\n└── src\n    └── main.rs\n```\n\nTry building the code and running it using `cargo`.\n\n```shell\ncargo build\n\ncargo run\n```\n\nCommit all changes and push them to your GitLab project.\n\n```shell\ngit commit -avm \"Initialize Cargo\"\n\ngit push\n```\n\nAfter exploring Cargo, let's make sure that our code is continuously tested while learning Rust. The next section explains how to set up [GitLab CI/CD](https://about.gitlab.com/topics/ci-cd/) for Rust. \n\n## Automation: Configure CI/CD pipeline for Rust\nThe [CI/CD pipeline](https://docs.gitlab.com/ee/ci/) should run two jobs in two stages: Build and test the code. The default container [image](https://docs.gitlab.com/ee/ci/yaml/#image), `rust:latest`, works in the first iteration. In order to save resources, the CI/CD configuration also supports [caching](https://docs.gitlab.com/ee/ci/caching/) for downloaded dependencies and build objects. The `CARGO_HOME` variable is set to the CI/CD job home directory to ensure everything gets appropriately cached.\n\n```yaml\nstages:\n  - build\n  - test \n\ndefault:\n  image: rust:latest\n  cache:\n    key: ${CI_COMMIT_REF_SLUG}\n    paths:                      \n      - .cargo/bin\n      - .cargo/registry/index\n      - .cargo/registry/cache\n      - target/debug/deps\n      - target/debug/build\n    policy: pull-push\n\n# Cargo data needs to be in the project directory to be cached. \nvariables:\n  CARGO_HOME: ${CI_PROJECT_DIR}/.cargo      \n```\n\nThe CI/CD jobs inherit the [`default`](https://docs.gitlab.com/ee/ci/yaml/#default) values, and specify the cargo commands in the [`script` section](https://docs.gitlab.com/ee/ci/yaml/#script).\n\n```yaml\nbuild-latest:\n  stage: build\n  script:\n    - cargo build --verbose\n\ntest-latest:\n  stage: build\n  script:\n    - cargo test --verbose\n```\n\nYou can see an example in [this MR](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/merge_requests/1/diffs).\n\n## Continue learning Rust \nMake sure to add new source code into the `src/` directory. \n\n### Define variables and print them\nPractice adding a few more [print](https://doc.rust-lang.org/rust-by-example/hello/print.html) statements into `src/main.rs`, and then build and run the code again.\n\n1) Define a variable called `name` and assign your name as string value.\n\n2) Print the name, including a string prefix saying `Hello, `. \n\n![VS Code main.rs Rust code suggestion, first step in print](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_print_variable_first.png){: .shadow}\n\n![VS Code main.rs Rust code suggestion, second step in print](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_print_variable_second.png){: .shadow}\n\n1) Open a new terminal in VS Code using the command palette (keyboard shortcut `cmd + shift + p`) and search for `terminal`.\n\n2) Build and run the code with the `cargo build` and `cargo run` commands. \n\n![VS Code terminal with cargo build and run output](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_print_variable_cargo_build_run_terminal.png){: .shadow}\n\nAn example solution can be found [here](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/blob/main/solutions/variable_print.rs). \n\n### Explore variable types \nDefine different variable value types ([primitives](https://doc.rust-lang.org/rust-by-example/primitives.html)) and embed them into the `print` statements. Maybe they feel familiar with other programming languages?\n\nTip: Use code comments to see which code suggestions can be useful to learn. Start with typing `// Integer addition` and see what code suggestions you can add.\n\n![VS Code main.rs Rust code suggestion, primitive types with literals and expressions](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_primitive_types_literals_operators.png)\n\nExperiment with GitLab Duo Code Suggestions. The shown examples are non-deterministic, but you may be able to add additions, subscriptions, multiplications, etc., and the corresponding `println` statements just by accepting code suggestions and continuing the flow with `enter` or completing the code statements. This workflow can create a chain of code suggestions that can help you learn the Rust language. \n\n![Literals and expressions, first suggestion](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_literals_expressions_01.png){: .shadow}\n![Literals and expressions, second suggestion](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_literals_expressions_02.png){: .shadow}\n![Literals and expressions, third suggestion](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_literals_expressions_03.png){: .shadow}\n\nAn example solution can be found [here](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/blob/main/solutions/literals_expressions.rs). \n\nThe code suggestions are not perfect. Sometimes there are errors that require you to fix the problems. When writing this blog post, I had to fix two missing semicolons at the end of the code lines. The great thing about the Rust compiler is that the error messages tell you exactly where the problem happens with suggestions to fix them. Code Suggestions and the Rust-provided build chain make writing Rust code more efficient. \n\n```rust\nprintln!(\"Hello, {}!\", name)\n\n// Integer subtraction\nlet y = 9 - 4\n```\n\n![Terminal build, errors, Rust compiler help](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_terminal_errors_rust_help.png){: .shadow}\n\nYou can try to provoke the same error by removing a semicolon at the end of a statement and then running `cargo build` in the terminal again. The Rust compiler will also warn you about unused variables to help with better code quality. The screenshot shows warnings for variable definitions, and also a CLI command to fix them. \n\n![Terminal build, warnings, Rust compiler help](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_terminal_warnings_rust_help.png){: .shadow}\n\n### Flow control: Conditions and loops \nNext, let's focus on [flow control](https://doc.rust-lang.org/rust-by-example/flow_control.html) with conditions, loops, etc., and how to implement them.\n\n1) Start typing `// Flow control` and see which suggestions are provided.\n\n2) Experiment with the code, and continue defining a boolean variable `v` which is set to true. \n\n```rust\n  // Flow control\n  let v = true;\n\n```\n\n![Conditions, boolean variable](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_flow_control_conditions_01.png){: .shadow}\n![Conditions, boolean variable, if condition](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_flow_control_conditions_02.png){: .shadow}\n\n1) Start typing `// Loops` and experiment with the code suggestions. \n\nLet's assume the loop looks the like following snippet. It does not have a loop counter which gets printed on every loop execution.\n\n```rust\n// Loops\nlet mut count = 0;\n\nloop {\n    count += 1;\n\n    if count == 10 {\n        break;\n    }\n}\n```\n\n2) Start typing `println!` and see which code suggestions are provided, for example `println!(\"Count: {}\", count);`. \n\n![Loops, loop counter print suggestion](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_flow_control_loops_print_counter.png)\n\n3) Apply the suggestions, and execute `cargo build && cargo run` on the terminal again. \n\nLet's learn more: Rust supports different loop types, for example [while loops](https://doc.rust-lang.org/rust-by-example/flow_control/while.html), [for loops](https://doc.rust-lang.org/rust-by-example/flow_control/for.html), etc. \n\n1) Type `// While loop` and verify the code suggestions. Repeat the same for `// For loop`.\n\n```rust\n// While loops\nlet mut count = 0;\n\nwhile count \u003C 10 {\n    count += 1;\n    println!(\"Count: {}\", count);\n}\n\n// For loops\nlet a = [10, 2, 3, 4, 5];\n\nfor element in a {\n    println!(\"Element: {}\", element);\n}\n```\n\nThere is more to learn with loops and conditions: Iterate over arrays, lists, maps, slices. Practice with writing comments for `// Maps and sets` and `// Vectors and strings`. \n\n![Vectors, strings](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_flow_control_vectors_strings.png){: .shadow}\n\n```rust\n  // Maps and sets\n  let mut scores = HashMap::new();\n\n  scores.insert(String::from(\"Blue\"), 10);\n  scores.insert(String::from(\"Yellow\"), 50);\n\n  for (key, value ) in &scores {\n      println!(\"{}: {}\", key, value);\n  }\n\n  // Vectors and strings\n  let mut v = Vec::new();\n\n  v.push(1);\n  v.push(2);\n\n  for element in &v {\n      println!(\"Element: {}\", element);\n  }  \n```\n\nThis snippet will fail because the `HashMap` type needs to be imported from `std::collections::HashMap`. Add the following line on top before the main function definition: \n\n```rust\nuse std::collections::HashMap;\n``` \n\n2) Build and run the code with `cargo build && cargo run`. \n\nAn example solution is provided [here](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/blob/main/solutions/flow_control.rs).\n\n**Async exercise**: \n\n1) Modify the input values for the different data types, and build and run the code again.\n\n2) Add a condition into the loops that print the items only when a specific condition is met (for example, the number is odd or even). \n\n### Functions \n\n[Functions](https://doc.rust-lang.org/rust-by-example/fn.html) help increase code readability and testability with unit tests. Practice creating functions with the following steps: \n\n1) Two functions `isEven` and `isOdd` to evaluate whether a number is even or odd.\n\n```rust\nfn isEven(x: i32) -> bool {\n    x % 2 == 0\n}\n\nfn isOdd(x: i32) -> bool {\n    x % 2 != 0\n}\n```\n\n2) `isPrime` function to check whether a given integer value is a prime number.\n\n```rust\nfn isPrime(x: i32) -> bool {\n    let mut i = 2;\n\n    while i * i \u003C= x {\n        if x % i == 0 {\n            return false;\n        } else {\n            i += 1;\n        }\n    } \n\n    return true\n}\n```\n\n3) Create an array of integer values, loop over it, and call the functions. Let GitLab Code Suggestions guide you with the implementation by starting to type the if conditions followed by the function name. \n\n```rust\n  // Functions\n  let mut integers = vec![1, 2, 3, 4, 5];\n\n  for i in integers.iter() {\n\n      if (isEven(i)) {\n          println!(\"{} is even\", i);\n      }\n\n      if (isOdd(i)) { \n          println!(\"{} is odd\", i);\n      }\n\n      if (isPrime(i)) { \n          println!(\"{} is prime\", i);\n      }\n\n      println!(\"{}\", i);\n  }\n```\n\nNote that passing a reference value to a function may result in an error from the Rust compiler. Follow the suggestions and build the code again. \n\n```shell\n$ cargo build && cargo run \n\nerror[E0308]: mismatched types\n   --> src/main.rs:112:21\n    |\n112 |         if (isPrime(i)) { \n    |             ------- ^ expected `i32`, found `&{integer}`\n    |             |\n    |             arguments to this function are incorrect\n    |    \nnote: function defined here\n   --> src/main.rs:136:4\n    |\n136 | fn isPrime(x: i32) -> bool {\n    |    ^^^^^^^ ------\nhelp: consider dereferencing the borrow\n    |\n112 |         if (isPrime(*i)) { \n    |                     +\n```\n\nAn example solution is provided [here](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/blob/main/solutions/functions.rs).\n\n**Async exercise**: Create a function `containsString` and test it with an array of string values, and a string to search for, in a loop. The screenshot shows a potential implementation. \n\n![containsString function, and vector with string elements to test, suggesting its usage in the main function](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_function_implemented_then_suggested_in_main.png){: .shadow}\n\n### Testing \nWhile learning programming, adopt [testing](https://doc.rust-lang.org/rust-by-example/testing.html) into your process. This can be unit tests for functions, documentation testing, and integration testing. Practice with testing the previously created functions `isEven`, `isOdd`, and `isPrime`. Starty by typing `mod tests {` followed by a new line with `use super::*` to implement the example from the [Rust documentation for unit tests](https://doc.rust-lang.org/rust-by-example/testing/unit_testing.html).\n\n```rust\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_is_even() {\n        assert!(isEven(2)); \n        assert!(!isEven(3));\n    }\n\n    #[test] \n    fn test_is_odd() {\n        assert!(!isOdd(2));\n        assert!(isOdd(3));\n    }\n\n    #[test]\n    fn test_is_prime() { \n        assert!(isPrime(2));\n        assert!(!isPrime(3));\n    }\n}\n```\n\nRun `cargo test` to run the unit tests. Modify the test values to experiment with the results. \n\n```shell\ncargo test\n```\n\n![Function unit tests, cargo test output in the VS Code terminal](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_function_unit_tests_terminal_run.png)\n\nCreate unit tests that fail, and commit and push the changes to GitLab. The CI/CD pipelines will fail in this simulated breakage. The example above needs a fix for the `test_is_prime` test. Commit and push the change to verify that the pipeline passes again. \n\n```diff\n-        assert!(!isPrime(3));\n+        assert!(!isPrime(4));\n```\n\n## What is next \nIn an upcoming blog, we will look into advanced learning examples with asynchronous operations, services and external API communication in future blog posts. Until then, here are a few recommendations for practicing async.\n\n### Async learning exercises\n- [`std misc`](https://doc.rust-lang.org/rust-by-example/std_misc.html) provides asynchronous operations with threads, channels and file I/O\n- Book: [Hands-on Rust: Effective Learning through 2D Game Development and Play](https://pragprog.com/titles/hwrust/hands-on-rust/)\n- Tutorial: [Are we game yet?](https://arewegameyet.rs/resources/tutorials/)\n- Use case: [Web server with rocket.rs](https://rocket.rs/v0.5-rc/guide/quickstart/#running-examples)\n\nHere are a few more exercises and ideas for additional learning:\n1) The Rust compiler might have created warnings that need to be addressed. Follow the instructions from the `cargo build` commands and check the Git diff. \n\n```\ncargo fix --bin \"learn-rust-ai\"\n\ngit diff \n```\n\n2) [Error handling](https://doc.rust-lang.org/rust-by-example/error.html) is required when failure is detected, and the caller should know. Some errors can be recovered from within the application, others require program termination. \n\n3) The [`std` library](https://doc.rust-lang.org/rust-by-example/std.html) extends primitive types and makes programming more efficient. \n\n### Share your feedback\nWhich programming language are you learning or considering learning? Start a new topic on our [community](/community/) forum or Discord and share your experience.  \n\nIf you are using Code Suggestions Beta with [GitLab Duo](/gitlab-duo/) already, please share your thoughts and feedback [in this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/405152).\n",[703,2283,724,746,9],{"slug":2319,"featured":6,"template":680},"learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started","content:en-us:blog:learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started.yml","Learning Rust With A Little Help From Ai Code Suggestions Getting Started","en-us/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started.yml","en-us/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started",{"_path":2325,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2326,"content":2332,"config":2338,"_id":2340,"_type":14,"title":2341,"_source":16,"_file":2342,"_stem":2343,"_extension":19},"/en-us/blog/machine-learning-and-devsecops",{"title":2327,"description":2328,"ogTitle":2327,"ogDescription":2328,"noIndex":6,"ogImage":2329,"ogUrl":2330,"ogSiteName":693,"ogType":694,"canonicalUrls":2330,"schema":2331},"Machine learning and DevSecOps: Inside the OctoML/GitLab integration","MLOps and DevSecOps teams can unify their workflows and gain automation and cost efficiencies.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666915/Blog/Hero%20Images/autodevops.jpg","https://about.gitlab.com/blog/machine-learning-and-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Machine learning and DevSecOps: Inside the OctoML/GitLab integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sameer Farooqui, OctoML\"}],\n        \"datePublished\": \"2023-02-23\",\n      }",{"title":2327,"description":2328,"authors":2333,"heroImage":2329,"date":2335,"body":2336,"category":1156,"tags":2337},[2334],"Sameer Farooqui, OctoML","2023-02-23","\n\nMachine learning can be a powerful tool in software development, but not if it has to live apart from existing engineering workflows. DevSecOps teams, including MLOps, can now integrate [OctoML CLI](https://gitlab.com/octoml/octoml-public/octoml-cli-tutorials) into [GitLab’s CI/CD Pipelines](https://docs.gitlab.com/ee/ci/pipelines/) to unify workflows and leverage existing deployment and monitoring infrastructure. This integration makes it easier to catch bugs and model performance degradations early in the ML development cycle. \n\nThe OctoML Platform is a machine learning model optimization and deployment service powered by octoml.ai. [Machine learning has grown in popularity](/blog/top-10-ways-machine-learning-may-help-devops/) in DevSecOps, along with AI, because of its ability to learn and model how to perform complex tasks as a human would and then automate those tasks.\n\n## How does CI/CD apply to machine learning?\n\nOnce a machine learning model has been successfully deployed, it can get stale over time and its accuracy could degrade, a situation called “data drift”. Data drift causes newer inferencing data to drift away from the data used to train the model. In the retail industry, this can happen because of seasonality, as an example.\n\nProduction models must be regularly refreshed by retraining their weights with the latest data. Applying CI/CD concepts borrowed from software engineering, the OctoML CI integration makes the deployment process for trained/re-trained models automated and repeatable.\n\n## How OctoML CLI and GitLab work together\n\nNew commits to your inference code repository can run [OctoML CLI](https://github.com/octoml/octoml-cli-tutorials#readme) in your GitLab pipeline to automatically optimize machine learning models for lowest cost per inference and lowest latency, and then deploy the optimized model to your cloud registry. For customers looking for more granular packaging formats that integrate with existing containerization systems, OctoML offers [Python wheel packaging](https://app.octoml.ai/docs/deploy.html#python-wheel-deployment) and will soon offer YAML configuration files. To reduce model latency and serving costs, OctoML searches through multiple acceleration engines such as Apache TVM, ONNX Runtime, and TensorRT and then suggests the ideal CPU or GPU hardware type on AWS, Azure, or GCP.\n\n## Choice in cloud deployment targets\n\nUsing OctoML CLI, developers can send any trained model to OctoML’s SaaS platform for cost efficiency and cloud hardware benchmarking. By adapting and optimizing the trained model to leverage hardware intrinsics in CPU and GPUs, OctoML makes inferences run faster in production, thus saving users on cost per inference and improving the user experience of ML applications.\n\n![Cloud workflow](https://about.gitlab.com/images/blogimages/octomlintegration/image1.png){: .shadow}\n\nThe cloud workflow is designed for enterprise and production deployments. Here’s how it works:\n\n* The initial push from a developer to the GitLab repository launches a local, shared, or remote runner.\n* The runner will send the updated, trained model first to OctoML’s platform for acceleration and hardware adaptation.\n* Then, the pipeline pushes the accelerated model container to the GitLab Container Registry.\n* Finally, it deploys the container to a managed Kubernetes service in any of the major cloud providers.\n\nModels deployed via the accelerated cloud workflow not only provide end users the lowest latency user experience but also save the organization compute costs at inference time, which can amount to \\[90% of a production machine learning application’s compute costs](https://aws.amazon.com/blogs/machine-learning/reduce-ml-inference-costs-on-amazon-sagemaker-with-hardware-and-software-acceleration/).\n\n## Four required stages for every pipeline\n\nEach pipeline has four stages: setup, package, deploy, and test. Here’s the logical flow:\n\n![Logical flow](https://about.gitlab.com/images/blogimages/octomlintegration/image2.png){: .shadow}\n\n1. common:setup - produces OctoML CLI binary artifact and passes it on to local:package\n2. cloud:package - packages the incoming model into a Docker tarball using the OctoML CLI binary and passes the tarball to the next stage\n3. cloud:deploy - builds a Docker image from the Tarball and deploys the docker container to a remote registry (in our example, we deploy it to AWS via GitLab Container Registry using Flux, but there can be other mechanisms)\n4. cloud:test - run the user-provided test script\n\nWhen a cloud pipeline is executed, the GitLab Pipeline UI will display a corresponding workflow:\n\n![GitLab Pipeline UI](https://about.gitlab.com/images/blogimages/octomlintegration/image3.png){: .shadow}\n\nSimilar to any other GitLab CI/CD job, our [example repository](https://gitlab.com/octoml/octoml-public/octoml-cli-tutorials) has YAML files that define how each stage will execute. You can easily clone the repository or code and adapt it to your custom model and inference code:\n\n![example repository](https://about.gitlab.com/images/blogimages/octomlintegration/image4.png){: .shadow}\n\nIn addition to the stage YAML files, OctoML CLI also has its own `octoml.yaml` configuration, which defines the path to your model, hardware type the model should be accelerated for, and the model’s input shapes:\n\n![octoml.yaml config](https://about.gitlab.com/images/blogimages/octomlintegration/image5.png){: .shadow}\n\n## Get started with OctoML CLI and GitLab CI/CD\n\nOctoML CLI and GitLab CI/CD unify your software engineering and machine learning pipelines by allowing ML models to be deployed using the same infrastructure and processes you’re currently using for software applications. Further, our integration makes it seamless to start with local model deployments to test end-to-end inference and move to accelerated cloud deployments with minimal changes to your workflow.\n\n**We’ve [published tutorials](https://gitlab.com/octoml/octoml-public/octoml-cli-tutorials) with an NLP (Bertsquad) and Vision (YOLOv5) model for end-to-end examples. So, to get started, download the [OctoML CLI](https://try.octoml.ai/cli/) and [request an acceleration consultation](https://try.octoml.ai/cli/#lp-pom-block-105) to receive a token to OctoML’s SaaS platform.**\n",[231,9,703],{"slug":2339,"featured":6,"template":680},"machine-learning-and-devsecops","content:en-us:blog:machine-learning-and-devsecops.yml","Machine Learning And Devsecops","en-us/blog/machine-learning-and-devsecops.yml","en-us/blog/machine-learning-and-devsecops",{"_path":2345,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2346,"content":2352,"config":2359,"_id":2361,"_type":14,"title":2362,"_source":16,"_file":2363,"_stem":2364,"_extension":19},"/en-us/blog/machine-learning-on-the-gitlab-devops-platform",{"title":2347,"description":2348,"ogTitle":2347,"ogDescription":2348,"noIndex":6,"ogImage":2349,"ogUrl":2350,"ogSiteName":693,"ogType":694,"canonicalUrls":2350,"schema":2351},"How Comet can streamline machine learning on The GitLab DevOps Platform","Here's a step-by-step look at how to bring ML into software development using Comet on GitLab's DevOps Platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669991/Blog/Hero%20Images/ways-to-encourage-collaboration.jpg","https://about.gitlab.com/blog/machine-learning-on-the-gitlab-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Comet can streamline machine learning on The GitLab DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2021-11-08\",\n      }",{"title":2347,"description":2348,"authors":2353,"heroImage":2349,"date":2355,"body":2356,"category":1032,"tags":2357},[2354],"William Arias","2021-11-08","\n\nBuilding machine learning-powered applications comes with numerous challenges. When we talk about these challenges, there is a tendency to overly focus on problems related to the quality of a model’s predictions—things like data drift, changes in model architectures, or inference latency. \n\nWhile these are all problems worthy of deep consideration, an often overlooked challenge in [ML development](/topics/devops/the-role-of-ai-in-devops/) is the process of integrating a model into an existing software application.  \n\nIf you’re tasked with adding an ML feature to a product, you will almost certainly run into an existing codebase that must play nicely with your model. This is, to put it mildly, not an easy task. \n\nML is a highly iterative discipline. Teams often make many changes to their codebase and pipelines in the process of developing a model. Coupling an ML codebase to an application’s dependencies, unit tests, and CI/CD pipelines will significantly reduce the velocity with which ML teams can deliver on a solution, since each change would require running these downstream dependencies before a merge can be approved.  \n\nIn this post, we’re going to demonstrate how you can use [Comet](https://www.comet.ml/site/) with [GitLab’s DevOps platform](/solutions/devops-platform/) to streamline the workflow for your ML and software engineering teams, allowing them to collaborate without getting in each other's way.      \n\n## The challenge for ML teams working with application teams\n\nLet’s say your team is working on improving a feature engineering pipeline. You will likely have to test many combinations of features with some baseline model for the task to see which combinations make an impact on model performance.     \n \nIt is hard to know beforehand which features might be significant, so having to run multiple experiments is inevitable. If your ML code is a part of your application codebase, this would mean having to run your application’s CI/CD pipeline for every feature combination you might be trying. \n\nThis will certainly frustrate your Engineering and DevOps teams, since you would be unnecessarily tying up system resources, given that software engineering teams do not need to run their pipelines with the same frequency as ML teams do.  \n\nThe other issue is that despite having to run numerous experiments, only a single set of outputs from these experiments will make it to your production application. Therefore, the rest of the assets produced through these experiments are not relevant to your application code.     \n\nKeeping these two codebases separated will make life a lot easier for everyone – but it also introduces the problem of syncing the latest model between two codebases.     \n\n## Use The GitLab DevOps Platform and Comet for your model development process\n\nWith The GitLab DevOps platform and Comet, we can keep the workflows between ML and engineering teams separated, while enabling cross-team collaboration by preserving the visibility and auditability of the entire model development process across teams.     \n\nWe will use two separate projects to demonstrate this process. One project will contain our application code for a handwritten digit recognizer, while the other will contain all the code relevant to training and evaluating our model.  \n\nWe will adopt a process where discussions, code reviews, and model performance metrics get automatically published and tracked within The GitLab DevOps Platform, increasing the velocity and opportunity for collaboration between data scientists and software engineers for machine learning workflows.\n\n## Project setup\n\nOur project consists of two projects: [comet-model-trainer](https://gitlab.com/tech-marketing/devops-platform/comet-model-trainer) and [ml-ui](https://gitlab.com/tech-marketing/devops-platform/canara-review-apps-testing). \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/cometmodeltrainer.png){: .shadow}\n\nThe **comet-model-trainer** repository contains scripts to train and evaluate a model on the MNIST dataset. We have set up The GitLab DevOps Platform in a way that runs the training and evaluation Pipeline whenever a new merge request is opened with the necessary changes.\n\nThe **ml-ui** repository contains the necessary code to build the frontend of our ML application.\n\nSince the code is integrated with Comet, your ML team can easily track the source code, hyperparameters, metrics, and other details related to the development of the model.  \n\nOnce the training and evaluation steps are completed, we can use Comet to fetch summary metrics from the project as well as metrics from the Candidate model and display them within the merge request; This will allow the ML team to easily review the changes to the model. \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/buildmodelgraph.png){: .shadow}\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/summarymetrics.png){: .shadow}\n\nIn our case, the average accuracy of the models in the project is 97%. Our Candidate model achieved an accuracy of 99%, so it looks like it is a good fit to promote to production. The metrics displayed here are completely configurable and can be changed as necessary.        \n\nWhen the merge request is approved, the deployment pipeline is triggered and the model is pushed to Comet’s Model Registry. The Model Registry versions each model and links it back to the Comet Experiment that produced it.  \n![Alt text for your image](https://about.gitlab.com/images/blogimages/OpenComet_SparkVideo.gif){: .shadow}    \n\nOnce the model is pushed to the Model Registry, it is available to the application code. When the application team wishes to deploy this new version of the model to their app, they simply have to trigger their specific deployment pipeline.     \n\n## Running the pipeline\n\n### Pipeline outline\n\nWe will run the process outlined below every time a team member creates a merge request to change code in the `build-neural-network`script:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/modelapprove.png){: .shadow}\n\nNow, let’s take a look at the yaml config used to define our CI/CD pipelines depicted in the previous diagram:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/workflowsbranch.png){: .shadow}\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/script.png){: .shadow}\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/registermodel.png){: .shadow}\n\nLet's break down the CI/CD pipeline by describing the gitlab-ci.yml file so you can use it and customize it to your needs.\n\nWe start by instructing our GitLab runners to utilize Python:3.8 to run the jobs specified in the pipeline: \n\n`Image: python:3.8`\n\nThen, we define the job where we want to build and train the neural network:\n\n`Build-neural-network`\n\n### Build-neural-network \n\nIn this step, we start by creating a folder where we will store the artifacts generated by this job, install dependencies using the requirements.txt file, and finally  execute the corresponding Python script that will be in charge of training the neural network. The training runs in the GitLab runner using the Python image defined above, along with its dependencies.\n\nOnce the `build-neural-network` job has finalized successfully, we move to the next job: `write-report-mr`\n\nHere, we use another image created by DVC that will allow us to publish a report right in the merge request opened by the contributor who changed code in the neural network script. In this way, we’ve brought software development workflows to the development of ML applications. With the report provided by this job, code and model review can be executed within the merge request view, enabling teams to collaborate not only around the code but also the model performance.\n\nFrom the merge request page, we get access to loss curves and other relevant performance metrics from the model we are training, along with a link to the Comet Experiment UI, where richer details are provided to evaluate the model performance. These details include interactive charts for model metrics, the model hyperparameters, and Confusion Matrices of the test set performance, to name a few. \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/manualDeploy_SparkVideo.gif){: .shadow}\n\nWhen the team is done with the code and model review,  the merge request gets approved, and the script that generated the model is merged into the main codebase, along with its respective commit and the CI pipeline associated to it. This takes us to the next job: \n\n### Register-model\n\nThis job uses an integration between GitLab and Comet to upload the reviewed and accepted version of the model to the Comet Model Registry. If you recall, the Model Registry is where models intended for production can be logged and versioned. In order to run the commands that will register the model, we need to set up these variables: \n\n- COMET_WORKSPACE\n- COMET_PROJECT_NAME \n \nIn order to do that, follow the steps described [here](https://docs.gitlab.com/ee/ci/variables/#add-a-cicd-variable-to-an-instance).\n\nIt is worth noting that the `register-model` job only runs when the merge request gets reviewed and approved, and this behavior is obtained by setting `only: main` at the end of the job.\n\nFinally, we decide to let a team member have final control of the deployment so therefore we define a manual job:\n`Deploy-ml-ui`\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/deployuiml.png){: .shadow}\n\nWhen triggered, this job will import the model from Comet’s Model Registry and automatically create the necessary containers to build the user interface and deploy to a Kubernetes cluster. \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/downstream.png){: .shadow}\n\nThis job triggers a downstream pipeline, which means that the UI for this MNIST application resides in a different project. This keeps the codebase for the UI and model training separated but integrated and connected at the moment of deploying the model to a production environment.\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/multipipeline_SparkVideo.gif){: .shadow}\n\n## Key takeaways\n\nIn this post, we addressed some of the challenges faced by ML and software teams when it comes to collaborating on delivering ML-powered applications. Some of these challenges include:\n\n* The discrepancy in the frequency with which each of these teams need to iterate on their codebases and CI/CD pipelines.\n\n* The fact that only a single set of experiment assets from an ML experimentation pipeline is relevant to the application.\n\n* The challenge of syncing a model or other experiment assets across independent codebases.   \n\nUsing The GitLab DevOps Platform and Comet, we can start bridging the gap between ML and software engineering teams over the course of a project. \n\nBy having model performance metrics adopted into software development workflows like the one we saw in the issue and merge request, we can keep track of the code changes, discussions, experiments, and models created in the process. All the operations executed by the team are recorded, can be audited, are end-to end-traceable, and (most importantly) reproducible. \n\nWatch a demo of this process:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/W_DsNl5aAVk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n_About Comet:_\nComet is an MLOps Platform that is designed to help data scientists and teams build better models faster! Comet provides tooling to Track, Explain, Manage, and Monitor your models in a single place! \n\nLearn more about Comet [here](https://www.comet.ml/site/) and get started for free!\n\n\n\n",[849,2358,231,9],"demo",{"slug":2360,"featured":6,"template":680},"machine-learning-on-the-gitlab-devops-platform","content:en-us:blog:machine-learning-on-the-gitlab-devops-platform.yml","Machine Learning On The Gitlab Devops Platform","en-us/blog/machine-learning-on-the-gitlab-devops-platform.yml","en-us/blog/machine-learning-on-the-gitlab-devops-platform",{"_path":2366,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2367,"content":2372,"config":2377,"_id":2379,"_type":14,"title":2380,"_source":16,"_file":2381,"_stem":2382,"_extension":19},"/en-us/blog/mastering-gitlab-admin-tasks-with-gitlab-duo-chat",{"title":2368,"description":2369,"ogTitle":2368,"ogDescription":2369,"noIndex":6,"ogImage":737,"ogUrl":2370,"ogSiteName":693,"ogType":694,"canonicalUrls":2370,"schema":2371},"Mastering GitLab admin tasks with GitLab Duo Chat","Learn how to use Chat to streamline administrative tasks on self-managed instances, improving efficiency and problem-solving capabilities.","https://about.gitlab.com/blog/mastering-gitlab-admin-tasks-with-gitlab-duo-chat","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Mastering GitLab admin tasks with GitLab Duo Chat\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2024-08-09\",\n      }",{"title":2368,"description":2369,"authors":2373,"heroImage":737,"date":2374,"body":2375,"category":10,"tags":2376},[1175],"2024-08-09","As a GitLab administrator managing a self-hosted instance, you often face complex challenges that require innovative solutions. Enter [GitLab Duo Chat](https://about.gitlab.com/gitlab-duo/) – your AI-powered assistant that can significantly streamline your administrative tasks. In this article, we'll explore how you can leverage GitLab Duo Chat to solve intricate problems efficiently, using a real-world example of updating group memberships across multiple groups.\n\n## The power of GitLab Duo Chat for admins\n\nGitLab Duo Chat is more than just conversational AI; it's a powerful tool that can assist with complex administrative tasks. By providing context-aware suggestions and code snippets, Chat can help you navigate through GitLab's extensive feature set and underlying architecture.\n\n### Case study: Updating group memberships\n\nLet's dive into a scenario where an admin needs to add an administrator user to multiple [groups](https://docs.gitlab.com/ee/user/group/) – in this case, 50,000 groups. This task, while conceptually simple, can be daunting due to its scale.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/dBd957MK_DE?si=JYTzdRjVQHyB6rpl\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Step-by-step problem-solving with GitLab Duo Chat\n\n* **Initial approach:** Our first instinct might be to use GitLab's API, but with 50,000 groups, this could put significant strain on the system.\n* **Pivoting to the Rails console:** Instead, we can use the Rails console for a more efficient solution. Here's where Chat comes in handy.\n* **Crafting the solution:** Let's walk through the process of creating a Ruby script to accomplish our task:\n\n``` python\n\ndef add_admin_to_groups(parent_group, admin_user)\n     unless parent_group.members.exists?(user_id: admin_user.id)\n       parent_group.add_owner(admin_user)\n       puts \"Added #{admin_user.name} to #{parent_group.name}\"\n     end\n\n    parent_group.descendants.each do |subgroup|\n      add_admin_to_groups(subgroup, admin_user)\n    end\n  end\n\n# Usage\n\nadmin_user = User.find_by(username: 'mr_admin')\nparent_group = Group.find_by(path: 'top_group')\nadd_admin_to_groups(parent_group, admin_user)\n```\n\n* **Iterative refinement:** Through conversation with Chat, we refined this script, addressing issues like method availability and parameter requirements.\n\n### Key takeaways\n\n#### Non-deterministic responses\n\nGitLab Duo Chat may provide different answers to the same question, encouraging exploration of multiple solutions. As an example:\n\nAn admin asks, ***How can I improve CI/CD pipeline performance?***\n\nChat's first response could be:\n***Consider using GitLab CI/CD caching to speed up subsequent pipeline runs. Here's an example .gitlab-ci.yml snippet:***\n\n```\ncache:\n  key: ${CI_COMMIT_REF_SLUG}\n  paths:\n    - node_modules/    \n ```\n\n***This caches the `node_modules` directory, reducing build times for Node.js projects.***\n\nWhen asked again, Chat's second response could be: ***To improve CI/CD performance, try parallelizing your jobs. Here's an example:***\n\n```\ntest:\n  parallel: 3\n```\n\n***This runs your test job across three parallel runners, potentially reducing overall pipeline duration.***\n\n### Error interpretation and debugging\n\nChat can help interpret error messages and suggest fixes. As an example, an admin encounters an error: \"fatal: reference is not a tree: 8a7d460\". They could ask Chat: ***I'm seeing this Git error. What does it mean and how can I fix it?***\n\nChat could respond with:\n\n***This error typically occurs when Git can't find a specific commit. It could be due to a corrupted repository or incomplete fetch. Try these steps:***\n* ***Run `git fsck` to check for corruption.***\n* ***If that doesn't work, try git fetch --all --prune to update all remotes.***\n* ***If the issue persists, you might need to re-clone the repository.***\n\n***Always back up your repository before attempting these fixes.***\n\n### Code optimization and refactoring\n\nChat can assist in optimizing and refactoring code for better performance.\n\nAn example: An admin wants to optimize a Gitaly configuration for a large instance. They could ask Chat: ***How can I optimize Gitaly for an instance with 10,000+ repositories?***\n\nChat could respond with: ***For a large instance, consider this optimized Gitaly configuration:***\n\n```\n[concurrency]\nrpc_concurrency = 100\n\n[rate_limiting]\nrpc_rate_limit = 100\n\n[cgroups]\nmountpoint = '/sys/fs/cgroup'\nhierarchy_root = 'gitaly'\nmemory_bytes = 30000000000  # Adjust based on available memory\ncpu_shares = 1024\n```\n***This configuration increases concurrency, implements rate limiting to prevent overload, and uses cgroups (control groups) to manage resource allocation. Adjust values based on your specific hardware and needs.***\n\n## Best practices for using GitLab Duo Chat in admin tasks\n\n### Formulate queries clearly and provide context\n\nWhen using Chat, the quality of the response largely depends on the quality of your query. Clear, detailed questions that provide relevant context will yield more accurate and helpful answers.\n\nPoor query: ***Why is GitLab slow?***\n\nThis query lacks specifics and context, making it difficult for Chat to provide a targeted response. \n\nA better query would be: ***Our GitLab instance with 5,000 users and 3,000 projects is experiencing slow response times, especially during peak hours (9-11 AM EST). CPU usage on the application servers spikes to 90%. How can we diagnose and address this?***\n\nThis improved query provides crucial details:\n\n* scale of the instance (5,000 users, 3,000 projects)\n* nature of the problem (slow response times)\n* timing of the issue (peak hours, 9-11 AM EST)\n* observed symptoms (90% CPU spike)\n\nWith this information, Chat can provide more targeted advice.\n\nAn even better query would be: ***We're running GitLab 15.8.3 on a 3-node cluster (8 vCPUs, 32GB RAM each) with a separate PostgreSQL 13 database and Redis 6.2 instance. Our instance hosts 5,000 users and 3,000 projects. We're experiencing slow response times (average 5s, up from our usual 1s) during peak hours (9-11 AM EST), primarily affecting merge request creation and pipeline initiation. CPU usage on the application servers spikes to 90%, while database CPU remains under 60%. Gitaly CPU usage is around 70%. We've already increased Puma workers to 8 per node. What additional diagnostics should we run and what potential solutions should we consider?***\n\nThis query provides an extensive context, including:\n* GitLab version and infrastructure details\nspecific performance metrics (response time increase)\n* affected operations (merge requests, pipelines)\n* resource usage across different components\n* steps already taken to address the issue\n\nBy providing this level of detail, you enable Chat to:\n* understand the full scope of your environment\n* identify potential bottlenecks more accurately\n* suggest relevant diagnostic steps\n* propose solutions tailored to your specific setup\n\nAvoid recommending steps you've already taken.\n\nRemember, while GitLab Duo Chat is powerful, it's not omniscient. The more relevant information you provide, the better it can assist you. By following these guidelines, you'll get the most out of your interactions with Chat, leading to more effective problem-solving and administration of your GitLab instance.\n\n### Use GitLab Duo Chat's suggestions as a starting point and refine incrementally\n\nChat is an excellent tool for getting started with complex tasks, but it's most effective when used as part of an iterative process. Begin with a broad question, then use Chat's responses to guide your follow-up questions, gradually refining your understanding and solution.\n\n#### Initial query\n\nAdmin: ***How can I set up Geo replication for disaster recovery?***\n\nChat might respond with a basic setup guide, covering:\n- prerequisites for Geo setup\n- steps to configure the primary node\n- process for adding a secondary node\n- initial replication process\n\nThis provides a foundation, but complex setups like Geo often require more nuanced understanding. Here's how you might refine your queries:\n\n**- Follow-up Query 1**\n\nAdmin: ***How do I handle custom data in Geo replication?***\nThis question addresses a specific concern not covered in the initial setup. \n\n**- Follow-up Query 2**\n\nAdmin: ***What's the best way to test failover without disrupting production?***\n\nThis query focuses on a critical operational concern. \n\n**- Follow-up Query 3**\n\nAdmin: ***Can you help me create a runbook for Geo failover?***\n\nThis final query aims to consolidate the gathered information into a practical guide. The benefits of this incremental approach:\n\n1. By breaking down the complex topic of Geo replication into smaller, focused queries, you gain a more thorough understanding of the subject.\n2. Each follow-up question allows you to address specific concerns relevant to your environment, resulting in a more customized solution.\n3. The progression from setup to testing to creating a runbook ensures that you're not just understanding the theory, but also preparing for real-world implementation.\n4. The step-by-step process of refining your queries helps in better retention of the information, as you're actively engaging with the content.\n5. Follow-up questions often reveal aspects of the task you might not have initially considered, leading to a more robust final solution.\n\n#### Best practices for incremental refinement\n\n- Start with broad questions to establish a foundation.\n- Use Chat's responses to identify areas that need more clarification or detail.\n- Ask about specific scenarios or edge cases relevant to your environment.\n- Seek practical advice on implementation, testing, and maintenance.\n- Use your growing understanding to formulate increasingly specific and complex queries.\n- Periodically summarize your understanding and ask Chat to validate or correct it.\n\nRemember, while GitLab Duo Chat is a powerful tool, it's most effective when combined with your own critical thinking and domain knowledge. Use it to guide your learning and decision-making process, but always validate important information and test solutions in a safe environment before applying them to production systems.\n\n### Potential applications for self-managed GitLab instances\n\n#### User management at scale\n\nAdmin: ***We need to onboard 1,000 new users from an acquired company. The data is in a CSV file. How can we automate this process? I am an Admin, can I use a Python script for this?***\n\nChat: ***You can use GitLab's API for bulk user creation. Here's a Python script to get you started:***\n\n```python\nimport requests\nimport csv\n\ndef create_user(username, email, name, password):\n    url = 'https://gitlab.example.com/api/v4/users'\n    headers = {'PRIVATE-TOKEN': 'your_access_token'}\n    data = {\n        'username': username,\n        'email': email,\n        'name': name,\n        'password': password,\n        'skip_confirmation': True\n    }\n    response = requests.post(url, headers=headers, data=data)\n    return response.status_code == 201\n\nwith open('new_users.csv', 'r') as f:\n    reader = csv.reader(f)\n    for row in reader:\n        username, email, name, password = row\n        if create_user(username, email, name, password):\n            print(f\"Created user: {username}\")\n        else:\n            print(f\"Failed to create user: {username}\")\n```\n***This script reads user data from a CSV file and creates users via the GitLab API. Remember to handle errors and rate limiting for large batches.***\n\n#### Project and group administration\n\nAdmin: ***We need to implement a standardized project structure across our organization. How can we automate this? Assume I am an admin. Write me a script to do this via the API.***\n\nChat: ***You can use GitLab's API to create a project template and then apply it to new projects. Here's a bash script to create a new project with a standard structure:***\n\n```\n#!/bin/bash\n\nTOKEN=\"your_access_token\"\nGROUP_ID=\"your_group_id\"\n\ncreate_project() {\n  curl --header \"PRIVATE-TOKEN: $TOKEN\" \\\n  --data \"name=$1&namespace_id=$GROUP_ID\" \\\n  \"https://gitlab.example.com/api/v4/projects\"\n}\n\nadd_file() {\n  curl --request POST --header \"PRIVATE-TOKEN: $TOKEN\" \\\n  --form \"branch=main\" \\\n  --form \"content=$2\" \\\n  --form \"commit_message=Add $1\" \\\n  \"https://gitlab.example.com/api/v4/projects/$3/repository/files/$1\"\n}\n\nPROJECT_ID=$(create_project \"New-Standardized-Project\" | jq '.id')\n\nadd_file \"README.md\" \"# Welcome to our standardized project\" $PROJECT_ID\nadd_file \".gitlab-ci.yml\" \"stages:\\n  - build\\n  - test\\n  - deploy\" $PROJECT_ID\nadd_file \"CONTRIBUTING.md\" \"## How to contribute to this project\" $PROJECT_ID\n\necho \"Project created with ID: $PROJECT_ID\"\n```\n\n***This script creates a new project with a README, a basic CI/CD configuration, and a CONTRIBUTING guide. You can expand this to include more files or even clone a template repository.***\n\n#### Performance optimization\n\nAdmin: ***Our merge request creation process is slow. How can we diagnose and improve this? Assume I am an admin.***\n\nChat: ***As an instance admin, there are a few potential areas you can investigate to diagnose and improve the merge request creation process:\nDatabase Performance: The provided quotes show some example SQL queries related to merge request metrics. Slow database queries can significantly impact merge request creation performance. You should review the database performance, indexes, and query plans for any queries involved in the merge request creation process.***\n\n- ***Background jobs: Merge request creation may involve background jobs (e.g., for data processing, notifications, etc.). If these jobs are backed up or running slowly, it can impact the overall merge request creation time. Check the background job queues and workers.***\n- ***Code optimization: Analyze the code responsible for merge request creation to identify any potential bottlenecks or inefficiencies. This could include things like unnecessary database queries, inefficient algorithms, or lack of caching.***\n- ***Resource utilization: Monitor the resource utilization (CPU, memory, disk I/O) of the GitLab application servers during merge request creation. High resource utilization can lead to performance degradation.***\n- ***Network performance: If your GitLab instance is distributed across multiple nodes or servers, network performance between these components can impact merge request creation times. Check for any network bottlenecks or latency issues.***\n- ***Caching: Ensure that appropriate caching mechanisms are in place and functioning correctly. Inefficient or incorrect caching can lead to unnecessary overhead during merge request creation.***\n- ***Load testing: Perform load testing to simulate real-world scenarios and identify any performance bottlenecks or scalability issues that may arise under high load.***\n- ***Logging and monitoring: Enable detailed logging and monitoring for the merge request creation process. This can help identify specific areas of slowness or bottlenecks.***\n\n***To effectively diagnose and improve the merge request creation process, you'll likely need to combine multiple techniques, such as profiling, code analysis, database optimization, and load testing. Additionally, consulting the GitLab documentation and community resources can provide valuable insights and best practices specific to your GitLab version and configuration.***\n\n#### Troubleshooting complex issues\n\nAdmin: ***We're experiencing intermittent 502 errors on our GitLab instance. How can we troubleshoot this? I am an instance admin, how can I check the logs to find answers?***\n\nChat: ***To troubleshoot intermittent 502 errors on your GitLab instance as an admin, you can follow these steps:***\n\n***1. Check the main GitLab logs while reproducing the issue by running:***\n\n```\nsudo gitlab-ctl tail | tee /tmp/\u003Ccase-ID-and-keywords>.log\n```\n\n***2. Press Ctrl+C to stop tailing the logs.***\n\n***3. Run GitLabSOS to gather broader diagnostics about your instance:***\n\n```\nsudo /opt/gitlab/embedded/bin/gitlab-sos\n```\n\n***4. Use the fast-stats tool to analyze performance statistics from the logs:***\n\n```\nsudo /opt/gitlab/embedded/bin/fast-stats /var/log/gitlab/gitlab-rails/production.log\n```\n\n***5.Look for relevant log entries using the correlation ID that is present in most requests.***\n\n***6. Check the Pages logs (gitlab_pages_error.log) for any specific errors related to Pages communication issues with the GitLab API or loopback timeouts if using an AWS Network Load Balancer.***\n\n***7. Ensure your gitlab-secrets.json file is synced across all GitLab Pages instances if using `domain_config_source=auto`.***\n\n***8. If using systemd, configure GitLab Pages to listen on an explicit IP and port to avoid /tmp being cleaned up.***\n\n## Get started with GitLab Duo Chat\n\nGitLab Duo Chat is a powerful ally for administrators of self-managed GitLab instances. By leveraging its capabilities, you can tackle complex tasks more efficiently, learn new techniques, and ultimately become a more effective GitLab administrator.\n\nWe encourage you to experiment with Chat in your administrative workflows. Remember to use it responsibly and always verify the solutions it provides.\n\n> [Try GitLab Duo free for 60 days](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/).\n\n### Resources\n- [GitLab Duo documentation](https://docs.gitlab.com/ee/user/gitlab_duo/)\n- [GitLab Rails Console Cheat Sheet](https://docs.gitlab.com/ee/administration/operations/rails_console.html)\n- [GitLab API documentation](https://docs.gitlab.com/ee/api/)\n- [10 best practices for using AI-powered GitLab Duo Chat](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/)\n- [GitLab Duo Chat 101: Get more done on GitLab with our AI assistant](https://about.gitlab.com/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant/)\n",[9,724,482,704,725],{"slug":2378,"featured":91,"template":680},"mastering-gitlab-admin-tasks-with-gitlab-duo-chat","content:en-us:blog:mastering-gitlab-admin-tasks-with-gitlab-duo-chat.yml","Mastering Gitlab Admin Tasks With Gitlab Duo Chat","en-us/blog/mastering-gitlab-admin-tasks-with-gitlab-duo-chat.yml","en-us/blog/mastering-gitlab-admin-tasks-with-gitlab-duo-chat",{"_path":2384,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2385,"content":2391,"config":2396,"_id":2398,"_type":14,"title":2399,"_source":16,"_file":2400,"_stem":2401,"_extension":19},"/en-us/blog/measuring-ai-effectiveness-beyond-developer-productivity-metrics",{"title":2386,"description":2387,"ogTitle":2386,"ogDescription":2387,"noIndex":6,"ogImage":2388,"ogUrl":2389,"ogSiteName":693,"ogType":694,"canonicalUrls":2389,"schema":2390},"Measuring AI effectiveness beyond developer productivity metrics ","AI assistants are here, yet measuring AI's impact on productivity isn’t figured out. Here’s why it’s a difficult problem and how GitLab is solving it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671994/Blog/Hero%20Images/AdobeStock_700757695.jpg","https://about.gitlab.com/blog/measuring-ai-effectiveness-beyond-developer-productivity-metrics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Measuring AI effectiveness beyond developer productivity metrics \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2024-02-20\",\n      }",{"title":2386,"description":2387,"authors":2392,"heroImage":2388,"date":2393,"body":2394,"category":10,"tags":2395},[1410],"2024-02-20","AI-powered productivity tools promise to boost productivity by automating repetitive coding and tedious tasks, as well as generating code. How organizations measure the AI impact of these productivity tools has yet to be truly figured out. GitLab is working on a solution: AI Impact is a dashboard grounded in [value stream analytics](https://about.gitlab.com/solutions/value-stream-management/) that will help organizations understand the effect of [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our AI-powered suite of features, on their productivity. AI Impact is the culmination of what we’ve learned at GitLab about measuring the impact of AI, and we wanted to share those lessons with you.\n\n[A report for The Pragmatic Engineer](https://newsletter.getdx.com/p/developer-productivity-metrics-at-top-companies) shows that measuring productivity in general isn’t straightforward, with top engineering teams around the globe all using different metrics. If everyone has a different productivity metric to optimize, how do we even begin to measure the impact of AI productivity tools? Welcome to why measuring AI assistant productivity impact is difficult and commonly misses the mark.\n\n>  Follow the progress of our AI Impact dashboard and [share your feedback](https://gitlab.com/groups/gitlab-org/-/epics/12978).\n\n## Flawed productivity metrics\n\nSimplistic productivity metrics like lines of code contributed per day or acceptance rates of AI suggestions fail to capture downstream costs. For instance, GitClear, according to [an Infoworld article](https://www.infoworld.com/article/3712685/is-ai-making-our-code-stupid.html), “analyzed 153 million lines of changed code between January 2020 and December 2023 and now expects that code churn ('the percentage of lines that are reverted or updated less than two weeks after being authored') will double in 2024.\" Thus, simply measuring lines of code risks technical debt pileup and skill atrophy in developers.  \n\n## Indirect impacts are hard to quantify\n\nThe goal of AI developer tools is to remove toil, allowing developers to focus on higher value tasks like system architecture and design. But how much time is really saved this way versus spent reviewing, testing, and maintaining AI-generated code? These second-order productivity impacts are very difficult to accurately attribute directly to AI, which may give you a false sense of value. One solution to this is to choose who gets to use AI productivity tools carefully.\n\n## Focus should be on business outcomes\n\nUltimately, what matters is real-world business outcomes, not developer activity metrics. Tracking lead time, cycle time, production defects, and user satisfaction better indicate where bottlenecks exist. If AI tools generate usable code faster, and quality teams can’t keep up with changes, the end software product may decrease in quality and lead to customer satisfaction problems. Shipping more sounds great until it causes problems that take even more time, money, and effort to resolve. Measuring business outcomes is also difficult and these measurements frequently are lagging indicators of problems. Measuring quality defects, security issues, and application performance are all ways to identify business impact sooner. \n\n## The need to balance speed and quality\n\nWhile AI code generation has the potential to accelerate development velocity, it should not come at the cost of overall quality and maintainability. Teams must strike the right balance between velocity and writing maintainable, well-tested code that solves actual business problems. Quality should not be sacrificed purely to maximize productivity metrics. This is when measuring lines of code AI generates or number of AI suggestions developers accept can optimize for the problematic outcomes. More code doesn't necessarily mean higher quality or productivity. More code means more to review, test, and maintain – potentially slowing delivery down.\n\nLet’s look at an example: AI-generated code output is scoped to the area a developer is currently working on. Current AI tools lack the ability to assess the broader architecture of the application (amplified in a microservices architecture). This means that even if the quality of the generated code is good, it may lead to repetition and code bloat because it will be inserted into the area targeted rather than making wider systematic changes. This is problematic in languages that are architected with object-oriented languages that use DRY (don't-repeat-yourself) principles. This is an active area of research and we’re excited to adopt new approaches and technologies to increase the context awareness of our AI features.\n\nAcceptance rate can be particularly misleading, and unfortunately is becoming the primary way AI productivity tools measure success. Developers may accept an AI-generated suggestion but then need to heavily edit or rewrite it. Thus, the initial acceptance gives no indication of whether the suggestion was actually useful. Acceptance rate is fundamentally a proxy for AI assistant quality, yet it is misconstrued as a productivity measure. This is especially misleading when all vendors are measuring acceptance rate differently and marketing based on this number. GitLab intentionally does not use this kind of data in our marketing. What we’ve seen in practice is that developers use AI-generated code similar to how an actor uses a cue – they look at the generated code and say, \"oh, right, that's the nudge I needed, I'll take it from here.\" \n\n## Implementation and team dynamics play a key role\n\nHow productivity gains materialize depends on how AI tools are implemented and developer dynamics. If some developers distrust the technology or reviews become lax expecting AI to catch errors, quality may suffer. Additionally, introducing AI tools often necessitates changes to processes like code reviews, testing, and documentation. Productivity could temporarily decline as teams adjust to new workflows before seeing gains. Organizations must ensure that when implementing AI tools, that they allow teams time to figure out how it works and how it fits into their workflows, knowing that this trial-and-error period may lead to reduced productivity metrics before seeing productivity gains. \n\nTo get this balance right, it’s important to define the tasks that are highly accurate and consistent and train the team to use AI for those use cases (at least, at first). We know that AI code generation is useful for producing scaffolding, test generation, and syntax corrections, as well as generating documentation. Have teams start there and they will see better results and learn to use the tool more effectively. Remember you can’t measure AI’s impact in a week. You have to give teams time to find their rhythm with their AI assistants. \n\n## Challenges exist, but AI is the future\n\nNow that we’ve talked about the challenges of measuring AI impact and potential risks, we do want to say at GitLab we do believe AI has a huge role to play in the evolution of DevSecOps platforms. That’s why we’re building GitLab Duo. But we are not rushing into productivity measurement by showing acceptance rates, or lines of code generated. We believe these are a step backwards to previous ways of thinking about productivity. Instead we’re looking at the data we have within our unified DevSecOps platform to present a more complete picture of AI Impact.  \n\n## What to measure instead\n\nMeasuring the productivity impacts of AI developer tools requires nuance and a focus on end-to-end outcomes rather than isolated productivity metrics. For these reasons, simple quantitative metrics tend to miss the nuances of measuring productivity with AI developer tools. The key is to combine quantitative data from across the software development lifecycle (SDLC) with qualitative feedback from developers on how AI actually impacts their day-to-day experience and shapes long-term development practices. Only then can we get an accurate picture of the productivity gains these tools can offer. We view AI as an augmentor to DevSecOps adoption, rather than a replacement for doing things the right way. Organizations focusing on building the right muscles in their SDLC practice are the ones best positioned to actually take advantage of any potential gains in developer coding productivity.\n\nSo what metric should we use instead? At GitLab we already have [value stream analytics](https://about.gitlab.com/solutions/value-stream-management/), which examine the end-to-end flow of work from idea to production to determine where bottlenecks exist. Value stream analytics isn’t a single measurement, it’s the ongoing tracking of metrics like lead time, cycle time, deployment frequency, and production defects. This keeps the focus on business outcomes rather than developer activity. By taking a holistic view across code quality, collaboration, downstream costs, and developer experience, teams can steer these technologies to augment (rather than replace) human abilities over the long run. \n\n## Introducing GitLab's AI Impact approach\n\nGitLab has the whole picture being a unified DevSecOps platform that spans the entire SDLC. We built [Value Stream Management](https://about.gitlab.com/solutions/value-stream-management/) to empower teams with metrics and insights to ship better software faster. Blending GitLab [Value Stream Analytics](https://about.gitlab.com/solutions/value-stream-management/) and [DORA metrics](https://about.gitlab.com/solutions/value-stream-management/dora/), and GitLab Duo usage data, we can provide organizations with the complete picture of how AI is impacting their SDLC. We’re calling this dashboard AI Impact, and it’s coming in an upcoming release to measure GitLab Duo’s impact on productivity. Follow our progress and [share your feedback](https://gitlab.com/groups/gitlab-org/-/epics/12978). \n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._",[9,482,703,725],{"slug":2397,"featured":91,"template":680},"measuring-ai-effectiveness-beyond-developer-productivity-metrics","content:en-us:blog:measuring-ai-effectiveness-beyond-developer-productivity-metrics.yml","Measuring Ai Effectiveness Beyond Developer Productivity Metrics","en-us/blog/measuring-ai-effectiveness-beyond-developer-productivity-metrics.yml","en-us/blog/measuring-ai-effectiveness-beyond-developer-productivity-metrics",{"_path":2403,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2404,"content":2409,"config":2414,"_id":2416,"_type":14,"title":2417,"_source":16,"_file":2418,"_stem":2419,"_extension":19},"/en-us/blog/meet-gitlab-duo-the-suite-of-ai-capabilities",{"title":2405,"description":2406,"ogTitle":2405,"ogDescription":2406,"noIndex":6,"ogImage":1522,"ogUrl":2407,"ogSiteName":693,"ogType":694,"canonicalUrls":2407,"schema":2408},"Meet GitLab Duo, the suite of AI capabilities powering your workflows","Learn about GitLab Duo, an expanding toolbox of features integrated directly into the GitLab platform to assist DevSecOps teams.","https://about.gitlab.com/blog/meet-gitlab-duo-the-suite-of-ai-capabilities","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Meet GitLab Duo, the suite of AI capabilities powering your workflows\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2023-06-22\",\n      }",{"title":2405,"description":2406,"authors":2410,"heroImage":1522,"date":2411,"body":2412,"category":10,"tags":2413},[1486],"2023-06-22","\nHave you ever wanted a real-time partner alongside you to help as you develop code, improve operations, and secure your software? [GitLab Duo](https://about.gitlab.com/gitlab-duo/), available now, is a powerful set of AI capabilities within GitLab’s DevSecOps Platform that does just that – suggesting code, explaining vulnerabilities, forecasting value streams, and much more.\n\nThe name GitLab Duo is rooted in You + GitLab AI = the AI dynamic duo. GitLab Duo goes beyond just being an AI pair programmer: It is an expanding toolbox of features integrated into the DevSecOps Platform to help teams across the entire software development environment become more efficient. GitLab Duo is your go-to for planning refinement, security risk resolution, CI/CD pipeline health, and analytics charting. \n\nGitLab Duo is a customer-centric approach focused on privacy first, where customers know their intellectual property is secured.\n\n## GitLab Duo capabilities\nGitLab Duo includes:\n\n- **Code Suggestions** helps developers create new code and update existing code, reducing cognitive load, improving efficiency, and allowing them to spend more time adding unique value to their applications.\n- **Explain this Code** uses AI to examine code, both within a merge request and in the repository view, and provides a natural language explanation, helping to enable all teams to understand the code being merged.\n- **Explain this Vulnerability** helps developers write more secure code by providing a natural language description of the vulnerability and the steps to resolve it.\n- **Generate detailed descriptions of epics, issues, and tasks** helps ensure all teams are aligned and can achieve their common goals faster.\n- **Summarize Issue Comments** helps get everyone up to speed quickly in epics, issues, and tasks.  \n- **Chat** enables users to ask configuration questions and receive natural language explanations, as well as links to GitLab Docs. \n\nThroughout the year, all the GitLab Duo capabilities will become available via GitLab Duo Chat. As humans, we gravitate towards conversational chat, which is why we expect GitLab Duo Chat to become a de facto choice for how users interact with GitLab AI capabilities. We will be adding new capabilities to GitLab Duo, including assisting users with generating planning descriptions, using natural language for CI configuration and chart generation, refactoring vulnerabilities from code, suggesting a fix for failed tests, helping resolve CI/CD pipeline failures, summarizing vulnerability reports, and assisting with merge request reviews.\n\nAt GitLab, we believe everyone can contribute. By bringing GitLab Duo capabilities to every persona who uses GitLab, everyone can benefit from AI-powered workflows and organizations can ship secure software faster.\n\n## The impact of GitLab Duo on workflow efficiency\nOur goal is to help you achieve a 10x improvement in workflow efficiency by tapping into all of the [DevSecOps Platform’s AI capabilities](https://about.gitlab.com/blog/ai-ml-in-devsecops-series/). With GitLab 16, business leaders get an enterprise-grade solution that delivers greater efficiency by reducing tool sprawl and gives teams greater visibility into their workflows.\n\nTo learn more about the exciting features and capabilities of GitLab Duo, [watch the replay of our GitLab 16 event](https://about.gitlab.com/sixteen/).\n\nStay tuned for more updates, and get ready to experience a new era of AI-powered DevSecOps workflows with [GitLab Duo](https://about.gitlab.com/gitlab-duo/).\n",[1135,704,9,703],{"slug":2415,"featured":6,"template":680},"meet-gitlab-duo-the-suite-of-ai-capabilities","content:en-us:blog:meet-gitlab-duo-the-suite-of-ai-capabilities.yml","Meet Gitlab Duo The Suite Of Ai Capabilities","en-us/blog/meet-gitlab-duo-the-suite-of-ai-capabilities.yml","en-us/blog/meet-gitlab-duo-the-suite-of-ai-capabilities",{"_path":2421,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2422,"content":2427,"config":2432,"_id":2434,"_type":14,"title":2435,"_source":16,"_file":2436,"_stem":2437,"_extension":19},"/en-us/blog/meet-gitlab-duo-workflow-the-future-of-ai-driven-development",{"title":2423,"description":2424,"ogTitle":2423,"ogDescription":2424,"noIndex":6,"ogImage":799,"ogUrl":2425,"ogSiteName":693,"ogType":694,"canonicalUrls":2425,"schema":2426},"Meet GitLab Duo Workflow - the future of AI-driven development","Workflow, our autonomous AI agent, will transform the way teams build and ship software – our first bold step towards AI-driven DevSecOps.","https://about.gitlab.com/blog/meet-gitlab-duo-workflow-the-future-of-ai-driven-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Meet GitLab Duo Workflow - the future of AI-driven development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2024-06-27\",\n      }",{"title":2423,"description":2424,"authors":2428,"heroImage":799,"date":2429,"body":2430,"category":10,"tags":2431},[1486],"2024-06-27","Imagine if software wrote itself. It seems like a distant future, but with ongoing advancements in large language models and GitLab’s unified AI-powered DevSecOps platform, that future is quickly coming into focus. During our [GitLab 17 launch event](https://about.gitlab.com/seventeen/), we announced GitLab Duo Workflow, an autonomous AI agent that will transform how teams build, secure, deploy, and monitor software.\n\nGitLab Duo Workflow moves beyond the current landscape of reactive, prompt-based AI assistants by creating an autonomous team member actively contributing to optimize every aspect of your software development lifecycle. Workflow distinguishes itself by leveraging GitLab’s unified data store, which seamlessly connects all relevant data, projects, repositories, and documentation. This allows Workflow to be an intelligent, always-on agent that constantly monitors your projects, anticipates potential production issues, automatically identifies and resolves vulnerabilities, optimizes your applications for peak performance, and streamlines onboarding by rapidly building customized remote development environments.\n\nAI is transforming how secure software is created, maintained, updated, deployed, and monitored, enabling organizations to ship more software than ever before. GitLab Duo Workflow represents our first bold step towards AI-driven DevSecOps. We aim to empower developers to focus on high-level problem-solving, innovation, and value creation, while [GitLab Duo](https://about.gitlab.com/gitlab-duo/) handles repetitive tasks and optimization behind the scenes.\n\n## The vision for GitLab Duo Workflow\nWith GitLab Duo Workflow, we are laser-focused on tackling several key use cases to automate and optimize the software development process from end to end.\n### 1. Development automated\n\nStraight from the IDE, GitLab Duo Workflow helps plan and prioritize tasks tailored to individual projects and defined organizational processes. Using the requirements from a particular work item (whether an epic, issue, or task), Workflow produces an implementation plan that developers can review and refine. Then, Workflow works through the plan, generating or rewriting code to accomplish and meet the defined requirements. Workflow accomplishes this by operating within a [GitLab Remote Development workspace](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/), allowing it to safely and securely evaluate, write, and test code changes. This also ensures that Workflow produces code that both meets the requirements as well as passes all CI pipeline tests, including security scans. If the pipeline fails, Workflow automatically addresses issues and troubleshoots as needed, ensuring only high-quality code that meets your organization's standards is created and committed to your projects.\n\nOnce ready, Workflow will automatically create a merge request outlining its code changes and go through your merge request approval processes, including engaging with the code reviewer or maintainer. You can even ask Workflow to review your code and have it comment on your merge requests just as a human code reviewer would today. Even better, Workflow will implement those suggestions for you if you'd like it to. And this is only the start.\n\n### 2. Intelligent continuous improvement\nGitLab Duo Workflow will analyze your codebase in real time, suggesting architectural optimizations for enhanced efficiency, performance, and cost savings. Furthermore, it will proactively identify opportunities for code refactoring to improve scalability and address technical debt by suggesting changes to developers or automatically implementing them in a sandbox environment. Additionally, Workflow will dynamically manage cloud resources to prevent overprovisioning and ensure your applications always meet their performance targets.\n\n### 3. Proactive security and compliance\nSecurity and compliance are top priorities for any organization. GitLab Duo Workflow will automatically ask developers to apply patches, refactor insecure code, and adapt to emerging threats in real time. Moreover, Workflow will continuously assess security risks associated with your applications and production environments and assist you with implementing mitigating controls.\n\n### 4. Self-optimizing performance\nGitLab Duo Workflow will incorporate sophisticated feedback loops for continuous learning and improvement. By analyzing data from monitoring tools, user interactions, and business outcomes, it will continuously refine its view of your codebase to ensure that your application architectures are always aligned with your business needs. As we see with all AI, Workflow will constantly improve, catching and fixing its own mistakes as it learns to be a partner in your organization.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://player.vimeo.com/video/967982166?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allowfullscreen=\"true\" title=\"GitLab Duo Workflow the future of AI-driven DevSecOps\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## The future of AI is now\nGitLab Duo Workflow represents an exciting leap forward, transitioning from AI that requires constant human prompting to AI that drives development workflows and processes, seeking human guidance when needed. With GitLab's unified, AI-driven interface spanning the DevSecOps lifecycle, organizations will be able to create a new generation of AI-powered applications with unparalleled speed, efficiency, and innovation, all while maintaining the highest standards of security and compliance. No tradeoffs.\n\nStay tuned for more updates and insights as we continue to push the boundaries of what's possible with AI in software development. Together, let's embrace the future of AI-driven DevSecOps and unlock the full potential of your teams and organizations.\n\n> If you are curious about AI-driven DevSecOps and want to be part of this journey, including access to the pre-release program, please [sign up for our GitLab Duo Workflow waitlist](https://forms.gle/5ppRuNVb8LwSPNVJA).",[9,482,703,1135,746],{"slug":2433,"featured":91,"template":680},"meet-gitlab-duo-workflow-the-future-of-ai-driven-development","content:en-us:blog:meet-gitlab-duo-workflow-the-future-of-ai-driven-development.yml","Meet Gitlab Duo Workflow The Future Of Ai Driven Development","en-us/blog/meet-gitlab-duo-workflow-the-future-of-ai-driven-development.yml","en-us/blog/meet-gitlab-duo-workflow-the-future-of-ai-driven-development",{"_path":2439,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2440,"content":2445,"config":2449,"_id":2451,"_type":14,"title":2452,"_source":16,"_file":2453,"_stem":2454,"_extension":19},"/en-us/blog/merge-request-changes-summary-ai",{"title":2441,"description":2442,"ogTitle":2441,"ogDescription":2442,"noIndex":6,"ogImage":820,"ogUrl":2443,"ogSiteName":693,"ogType":694,"canonicalUrls":2443,"schema":2444},"ML experiment: Summarize merge request changes","Learn how GitLab is experimenting with ML-powered merge request changes summarization in this sixth installment of our ongoing AI/ML in DevSecOps series.","https://about.gitlab.com/blog/merge-request-changes-summary-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Summarize merge request changes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kai Armstrong\"}],\n        \"datePublished\": \"2023-04-20\",\n      }",{"title":2441,"description":2442,"authors":2446,"heroImage":820,"date":1987,"body":2447,"category":10,"tags":2448},[1448],"\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nMerge requests are the central point of collaboration for code changes in GitLab. They often contain a variety of changes across many files and services within a project. Often, merge requests communicate the intent of the change as it relates to an issue being resolved, but they might not describe what was changed to achieve that. As review cycles progress, the current state of the merge request can become out of sync with the realities of the proposed changes and keeping people informed. We believe that we can leverage AI and large language models (LLMs) to help provide relevant summaries of a merge request and its proposed changes, so reviewers and authors can spend more time discussing changes and less time keeping descriptions updated.\n\nIn a rapid prototype, [Kerri Miller](https://gitlab.com/kerrizor), Staff Backend Engineer for our [Code Review Group](https://handbook.gitlab.com/handbook/product/categories/#code-review-group), used AI to summarize the merge request changes directly within the [merge request](https://docs.gitlab.com/ee/user/project/merge_requests/). She developed a `/summarize_diff` quick action to post a summary of changes into a comment:\n\n![Merge request summary via AI](https://about.gitlab.com/images/blogimages/merge-request-changes-summary-ai.gif){: .shadow}\n\n## Iterating on AI/ML features\nWhile just an experiment today, we are iterating on how to effectively bring features like this to our customers. We're starting with providing complete summaries of what changes a merge request makes, and are beginning to look at more targeted flows to enhance the review cycle experience. Current areas we're investigating include providing:\n\n- Summaries of what's changed between each review cycle in a merge request.\n- Summaries of review feedback to merge request authors.\n\nThis experiment is just the start of the ways we're looking to infuse GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI-assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our ongoing series, \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\".\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[849,725,704,9],{"slug":2450,"featured":6,"template":680},"merge-request-changes-summary-ai","content:en-us:blog:merge-request-changes-summary-ai.yml","Merge Request Changes Summary Ai","en-us/blog/merge-request-changes-summary-ai.yml","en-us/blog/merge-request-changes-summary-ai",{"_path":2456,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2457,"content":2462,"config":2467,"_id":2469,"_type":14,"title":2470,"_source":16,"_file":2471,"_stem":2472,"_extension":19},"/en-us/blog/merge-request-suggest-a-test",{"title":2458,"description":2459,"ogTitle":2458,"ogDescription":2459,"noIndex":6,"ogImage":1349,"ogUrl":2460,"ogSiteName":693,"ogType":694,"canonicalUrls":2460,"schema":2461},"ML experiment: Generate tests for code changes","Learn how GitLab is experimenting with ML-powered test suggestions in this latest installment of our ongoing 'AI/ML in DevSecOps' series.","https://about.gitlab.com/blog/merge-request-suggest-a-test","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Generate tests for code changes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kai Armstrong\"}],\n        \"datePublished\": \"2023-04-27\",\n      }",{"title":2458,"description":2459,"authors":2463,"heroImage":1349,"date":2464,"body":2465,"category":10,"tags":2466},[1448],"2023-04-27","\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nProposing changes and new features via merge requests is great, but what about the tests? Sometimes, tests can be the hardest part of any code change you make. Maybe you're not sure how to start writing the tests? Maybe the test doesn't cover all the scenarios that need to be tested? Maybe you just want to get a second opinion on the tests that were written? We believe that we can use generative AI and large language models (LLMs) to help provide relevant test coverage for the proposed changes, so reviewers and authors can have confidence in the quality of code changes being submitted.\n\nIn a rapid prototype, [Phil Hughes](https://gitlab.com/iamphill), Staff Frontend Engineer for our [Code Review Group](https://handbook.gitlab.com/handbook/product/categories/#code-review-group), used AI to generate suggested test coverage for changes directly in the [merge request](https://docs.gitlab.com/ee/user/project/merge_requests/). He added a new option on merge request files to provide suggested tests in a sidebar:\n\n![Merge request test generation AI](https://about.gitlab.com/images/blogimages/merge-request-generate-tests-ai.gif){: .shadow}\n\n## Iterating on AI/ML features\n\nWhile just an experiment today, we are iterating on how to effectively bring features like this to our customers. We're beginning by generating these test suggestions, and seeking ways to incorporate them into the review flow. We're exploring ideas like:\n\n- Automatic detection of missing tests, with suggestions to add coverage\n- Automated review of the proposed tests in the merge request, for appropriateness and completeness\n\nThis experiment is just the start of the ways we're infusing GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI-assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our \"[AI/ML in DevSecOps](https://about.gitlab.com/blog/ai-ml-in-devsecops-series/)\" series.\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[703,725,704,9],{"slug":2468,"featured":6,"template":680},"merge-request-suggest-a-test","content:en-us:blog:merge-request-suggest-a-test.yml","Merge Request Suggest A Test","en-us/blog/merge-request-suggest-a-test.yml","en-us/blog/merge-request-suggest-a-test",{"_path":2474,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2475,"content":2480,"config":2485,"_id":2487,"_type":14,"title":2488,"_source":16,"_file":2489,"_stem":2490,"_extension":19},"/en-us/blog/ml-experiment-sql",{"title":2476,"description":2477,"ogTitle":2476,"ogDescription":2477,"noIndex":6,"ogImage":820,"ogUrl":2478,"ogSiteName":693,"ogType":694,"canonicalUrls":2478,"schema":2479},"ML experiment: Writing SQL is about to get a lot easier","Learn how GitLab is experimenting with ML-powered product features in this third installment of our ongoing AI/ML in DevSecOps series.","https://about.gitlab.com/blog/ml-experiment-sql","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Writing SQL is about to get a lot easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2023-03-30\",\n      }",{"title":2476,"description":2477,"authors":2481,"heroImage":820,"date":2482,"body":2483,"category":10,"tags":2484},[1410],"2023-03-30","\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nSQL, the structured query language, has long been the backbone of data analysis and manipulation. But let's face it, not everyone is an SQL wizard. For many, writing even a simple SQL query can be a daunting task, let alone tackling more advanced queries. Even experienced data analysts spend lots of time and effort writing and debugging complex queries just to answer simple business intelligence questions.\n\nWith the recent advancements in AI and natural language processing, it's now possible for AI models to generate SQL code from simple English language queries. This means that even people without a deep understanding of SQL can generate complex queries to analyze their data. This technology not only improves accessibility but can also save valuable time and effort for data analysts.\n\n## AI-assisted SQL generation\nAt GitLab, we’re experimenting with AI-assisted SQL generation in our [Product Analytics group](https://docs.gitlab.com/ee/user/product_analytics/). This area is focused on helping users understand and gain insights from usage patterns. GitLab Product Analytics can track events within your project applications, which enables you to explore your data and generate dashboards with interactive graphs and charts. You can use our visual designer or YAML to define them, and we envision it becoming even easier with AI assistance. You can learn more about our Product Analytics plans in our [sneak peek blog post](/blog/introducing-product-analytics-in-gitlab/).\n\nIn a simple experiment, our own [Tim Zallmann](https://gitlab.com/timzallmann), Senior Director of Engineering, prototyped leveraging AI-generated queries from simple natural language parsing. The results quickly showcase how powerful using natural language can be to help generate the SQL to populate the Product Analytics dashboards. \n\n![Animated gif image of SQL generation](https://about.gitlab.com/images/blogimages/sql-query-generation-lg.gif){: .shadow}\n\nAbove, you can see an example of how we're using natural language to generate SQL queries to power dashboard charts and graphs. You can watch the full demo in the video below. \n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/q3HZy0P0ugw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Iterating on AI/ML features\nWhile just an experiment today, we are iterating on how to effectively bring features like this to our customers. This experiment is just the start of many ways we’re looking to infuse GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI Assisted features. We’ll be sharing more of these demos in this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our ongoing series, \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\".\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[849,725,704,9],{"slug":2486,"featured":6,"template":680},"ml-experiment-sql","content:en-us:blog:ml-experiment-sql.yml","Ml Experiment Sql","en-us/blog/ml-experiment-sql.yml","en-us/blog/ml-experiment-sql",{"_path":2492,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2493,"content":2498,"config":2503,"_id":2505,"_type":14,"title":2506,"_source":16,"_file":2507,"_stem":2508,"_extension":19},"/en-us/blog/modern-software-development-problems-require-modern-ai-powered-devsecops",{"title":2494,"description":2495,"ogTitle":2494,"ogDescription":2495,"noIndex":6,"ogImage":820,"ogUrl":2496,"ogSiteName":693,"ogType":694,"canonicalUrls":2496,"schema":2497},"Modern software development problems require modern AI-powered DevSecOps","Learn how applying AI can help solve common development challenges.","https://about.gitlab.com/blog/modern-software-development-problems-require-modern-ai-powered-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Modern software development problems require modern AI-powered DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2023-09-07\",\n      }",{"title":2494,"description":2495,"authors":2499,"heroImage":820,"date":2500,"body":2501,"category":10,"tags":2502},[2354],"2023-09-07","\nThe landscape of software development is already being impacted by the integration of AI tools. GitLab is making that impact a positive one. Infusing AI throughout the software development lifecycle lowers the barrier for everyone to contribute. Let's take a look how organizations can evolve the idea _\"every company is a software company\"_ into _\"every company should be an AI-first company\"_ and how this transformation can happen by embracing AI-powered DevSecOps.\n\nAn effective way to demonstrate how AI can be a game-changer for software development teams is to walk through a detailed scenario. In this example, you'll see how applying [GitLab Duo](https://about.gitlab.com/gitlab-duo/), the suite of AI capabilities powering workflows, to a dilemma that many organizations face – how to automate complex customer-facing tools – can improve the experience for DevSecOps teams and customers alike.\n\n## Challenge: Keeping customer Q&A systems from going stale\nImagine a large financial services firm has an extensive FAQ section on its website but keeping that information up to date and easy to navigate has become increasingly difficult. Also, the FAQs don't cover queries that are less frequent but should be easy to respond to.\n\n![FAQ](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/faq-improved.gif)\n\nTo resolve this, the company proposed building an AI-enabled Q&A system that not only answers common queries but also other finance-related questions. The process to build the AI-enabled system described above can look like this:\n\n![process](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/process-demo.png)\n\n### Problem definition and collaboration\nThe journey begins with a GitLab issue. This is where the team collaborates, discusses ideas, and brainstorms to define the problem and scope of work. Collaborating on solving the Q&A system problem results in a lengthy issue thread where it can be challenging to quickly understand the status of the work. Anyone new to the issue or even current participants may need to spend considerable time reading comments to get an overview of the problem status and the decisions that have been made. AI-driven text summarization simplifies understanding of the comments, allowing developers and business teams to align on the problem and requirements.\n\n![AI-driven text summary](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/summary-hd.png)\n\nText summarization applied to issue comments \n{: .note.text-center}\n\nGitLab provides an AI-generated summary right in the issue where all the collaboration took place, thus helping development teams to get an overview of the main points and takeaways. The AI-generated summary enables developers and other team members to quickly extract and review key points from the issue without having to read through long conversations.\n\n### Solution development\nThe proposed solution involves leveraging a large language model (LLM) to build a Q&A system. The AI-generated summary allows developers to quickly break down the requirements and store them in GitLab using [Requirements management](https://docs.gitlab.com/ee/user/project/requirements/).  The requirements are stored as easy-to-understand items with their own criteria of success and expected behavior as depicted in the figure below:\n\n![requirements](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/requirements.png)\n\nAI Q&A system requirements in GitLab \n{: .note.text-center}\n\nHaving clear requirements is key to starting implementation of a successful solution, creating a [merge request](https://docs.gitlab.com/ee/user/project/merge_requests/), and iterating on the solution to the problem.\n\nWhile working towards a solution, GitLab Duo Code Suggestions can help developers focus. Leveraging generative AI, Code Suggestions provides relevant suggestions reducing time spent on repetitive or common code. This frees up developers to focus their efforts on complex problems that require deeper understanding of the codebase and business logic. \n\n![code-suggestions](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/code-suggestion.png)\n\nGenerated code speeds up development\n{: .note.text-center}\n\nDevelopers get code suggestions inline in their integrated development environment, or IDE, as they type or after prompting the model with code comments. The suggestions are predicted based on the code written before the cursor and the file extension of the script.\n\n### Efficient code review\nLet's consider that, until this point, developers working on the AI-enabled Q&A system have committed code contributions to build the solution. And it is also evident that infusing AI into the software development lifecycle has allowed the team to move faster. To move faster without compromising quality, however, it’s important to carry out code reviews on all the contributions done so far by the development team. Code reviews help developers share knowledge and maintain high-quality software. Companies like the one in this scenario might have a large number of developers, and identifying suitable reviewers can be challenging and time consuming. To resolve this, the company can leverage [GitLab Duo Suggested Reviewers](https://about.gitlab.com/gitlab-duo/). This feature employs machine learning to suggest the most relevant code reviewers, streamlining and speeding up the review process. \n\n![suggested-reviewers](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/suggested-review-sa.png)\n\nSuggested reviewers appear in the reviewer dropdown\n{: .note.text-center}\n\nThe machine learning algorithm that powers Suggested Reviewers learns from the project’s contribution graph data; the suggestions it generates are contextual to the changes made in the code.   \n\n### Code review summarization and test generation\nAI-driven text summarization helps in understanding suggested code changes derived from the code review at a glance. In our scenario, once the appropriate reviewer has been chosen, this person can go through the code and submit comments, propose changes, or identify potential edge cases that may have been overlooked. Getting a quick summary of all the suggested code changes before diving into the details can potentially speed up software delivery.\n\n![mr-summary](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/MR-Summary.png)\n\nCode Review summary\n{: .note.text-center}\n\nAs part of the review, it is important to make sure that all the functions have been tested. If there was the case of an overlooked test, once again it is possible to leverage generative AI within GitLab to create [test cases](https://docs.gitlab.com/ee/ci/test_cases/).\n\n![suggest-test-cases](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/suggest-test-cases.png)\n\nTest cases generated by AI\n{: .note.text-center}\n\nIn the figure below, we see the test code generated by AI. This provides helpful assertions that can also serve as an inspiration for discovering edge cases that might have not been considered before.\n\n![test cases](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/test-cases-created.png)\n\nSuggested test cases\n{: .note.text-center}\n\nThis solidifies the quality of the project codebase, closing the loop of efficient code reviews.\n\n### Minimizing context switching\nAllowing developers to obtain guidance and answers to GitLab-related questions without leaving the platform can be achieved by interacting with GitLab Duo Chat functionality.This AI-powered feature reduces context switching and enables developers or other team members to ask open questions and request information using natural language. Let's assume that a developer wasn't exactly sure how to leverage AI to generate test cases like the ones requested during the code review. Using GitLab Duo Chat, the developer can ask how to achieve exactly that without having to leave the platform, as depicted in the figure below:\n\n![GitLab Duo Chat](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/chat.png)\n\nGitLab Duo Chat\n{: .note.text-center}\n\nInteracting with the platform using natural language provides a convenient way to achieve results and get things done while minimizing context switching, improving the software delivery experience and speed. Now, speaking of speed, let's dive into the realm of continuous integration/continuous delivery and deployment (CI/CD).\n\n### GitLab CI/CD \nThe AI-infused DevSecOps approach described so far aligns and enhances CI/CD methodologies. The financial services company in this scenario uses these continuous methodologies to deliver software faster since these are proven ways to automatically build, test, secure, and deploy software.\n\n![chatbot-pipeline](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/pipelines.png)\n\nGitLab CI/CD pipeline\n{: .note.text-center}\n\nThe figure above depicts the CI/CD pipeline that is triggered and used in the development of our LLM Financial Q&A system. The jobs it executes are:\n\n- Build: creates the container with its respective code and dependencies\n- Test: unit testing and security scanners, which help ensure that no code vulnerabilities are being introduced \n- Deploy: puts the solution in the hands of end users\n\nOnce the solution is deployed, end users can ask questions to it, and the FAQ problem can be considered solved.\n\n![chatbot](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/chatbot.gif)\n\nThe AI touchpoints explored in this scenario and that led to the faster development of our AI-powered Q&A system are seamlessly integrated within DevSecOps methodologies, enhancing the development and operations experience.\n\n![devsecops-ml](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/devsecops-embedded.png)\n\nUltimately, this increases the likelihood of a successful and secure deployment of the AI-powered Q&A system.\n\n### Recap\nGoing back to the process overview, in the following table we see how the different AI touchpoints are used throughout the software development lifecycle.\n\n|  | What AI-powered DevSecOps can do for you | Feature |\n| ------ | ------ | ------ |\n|![brainstorming](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/brainstorm.png) | AI generates a summary of the discussions and provide the most relevant information for your team to move forward in the solution development   |[Summarize issues](https://docs.gitlab.com/ee/user/ai_features.html#summarize-issue-discussions)       |\n|![requirements](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/solution-requirements.png)       | The team agrees on a chatbot and break down the application requirements and set criteria it must fulfill       |  [Requirements management](https://docs.gitlab.com/ee/user/project/requirements/)       |\n|![develop](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/develop.png)        | Developers leverage Code Suggestions to speed up code and generate tests       |   [Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html)     |\n|![reviewer](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/suggested-reviewer.png)        | Quickly find the most suitable person to review code changes and move on in the delivery process       |  [Suggested Reviewers](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/data_usage.html#:~:text=Suggested%20Reviewers%20is%20the%20first,and%20GitLab%20user%20account%20metadata.)      |\n|![code-review](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/review.png)        |  Summarize Merge Request, which includes code summary, helps contributors understand the most important changes that need to be addressed      | [Summarize Merge Request](https://docs.gitlab.com/ee/user/ai_features.html#summarize-merge-request-changes)      |\n|![pipeline](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/buildtest.png)        | CI/CD and integrated security       |   [CI/CD testing](https://docs.gitlab.com/ee/ci/testing/)    |\n|![deploy](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/deploy.png)        | Kubernetes agent, and multi-cloud deployments      |  [Kubernetes integration](https://docs.gitlab.com/ee/user/clusters/agent/)      |\n\nWatch the video walkthrough of the financial services scenario:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/LifJdU3Qagw?si=fpfvuhJQ_jYmYrT2\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nAs organizations embark on their AI journey, more and better software will have to be created. Drawing insights from DevSecOps combined with AI-powered workflows provides a solid foundation to enjoy the benefits of AI, unlock new use cases, and provide robust solutions that learn from data. Ultimately, AI enables organizations to enhance software development practices and to tackle modern problems with modern solutions.\n\nGet started with GitLab Duo today with this [free trial of GitLab Ultimate](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=localhost%2Fsolutions%2Fai%2F).\n",[703,482,9],{"slug":2504,"featured":6,"template":680},"modern-software-development-problems-require-modern-ai-powered-devsecops","content:en-us:blog:modern-software-development-problems-require-modern-ai-powered-devsecops.yml","Modern Software Development Problems Require Modern Ai Powered Devsecops","en-us/blog/modern-software-development-problems-require-modern-ai-powered-devsecops.yml","en-us/blog/modern-software-development-problems-require-modern-ai-powered-devsecops",{"_path":2510,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2511,"content":2517,"config":2522,"_id":2524,"_type":14,"title":2525,"_source":16,"_file":2526,"_stem":2527,"_extension":19},"/en-us/blog/modernizing-a-simple-c-application-to-java-with-gitlab-duo",{"title":2512,"description":2513,"ogTitle":2512,"ogDescription":2513,"noIndex":6,"ogImage":2514,"ogUrl":2515,"ogSiteName":693,"ogType":694,"canonicalUrls":2515,"schema":2516},"Modernizing a simple C++ application to Java with GitLab Duo","Learn how to refactor code from memory unsafe languages to memory safe languages with the help of GitLab's AI capabilities, saving time and effort on application modernization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659507/Blog/Hero%20Images/AdobeStock_623844718.jpg","https://about.gitlab.com/blog/modernizing-a-simple-c-application-to-java-with-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Modernizing a simple C++ application to Java with GitLab Duo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2024-12-11\",\n      }",{"title":2512,"description":2513,"authors":2518,"heroImage":2514,"date":2519,"body":2520,"category":10,"tags":2521},[720],"2024-12-11","Memory unsafe languages are those that do not handle any memory management on behalf of the developer. For example, when programming in C or C++, if you need memory during runtime, you will need to allocate and deallocate the memory yourself, running the risk of ending up with memory leaks in cases when you inadvertently forget to deallocate it. Other languages like Ada and FORTRAN provide some memory management but may not prevent memory leaks. Many organizations, including those in the public sector, have applications that have been developed using languages that are memory unsafe and are often looking to modernize these to a memory safe language, such as Java, Python, JavaScript, or Golang.\n\nThis tutorial focuses on a specific example of modernizing a simple C++ application to Java by refactoring it with the help of [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI capabilities, and shows how much time and effort you can save in the migration.\n\n## Understanding the simple C++ application\n\nLet’s make the assumption that we have been tasked with the migration of a C++ application to a memory safe language, namely Java. The C++ application can be found in the following project (thank you to [@sugaroverflow](https://gitlab.com/sugaroverflow) for contributing this sample application):\n\n[https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/refactor-to-java/air-quality-application](https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/refactor-to-java/air-quality-application)\n\nSince this is the first time we are seeing this application, let’s invoke GitLab Duo Code explanation to better understand what it does. We open file `main.cpp` in Visual Studio Code and select the entirety of this file. We then right-click and select **GitLab Duo Chat > Explain selected snippet** from the popup menu.\n\n![duo-code-explanation-menu-option](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675546/Blog/Content%20Images/code-explanation-menu-option.png)\n\nThe GitLab Duo Chat window opens up and the slash command `/explain` is executed for the selected code. Chat returns a very thorough and detailed description and explanation in natural language form of what each function does in the file as well as examples on how to run the compiled program.\n\n![code-explanation-text](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/code-explanation-text.png)\n\nIn short, the simple C++ application takes a U.S. zip code as input and returns the air quality index for that zip code.\n\n## Compiling and running the C++ application\n\nTo further understand this simple C++ application, we proceed to compile and run it. We could have asked Chat how to do this, however, the project has a README file that provides the commands to compile the project, so we go ahead and use those by entering them in the Terminal window of VS Code.\n\n![compile-command](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/compile-command.png)\n\nAfter the compilation finishes, we change directory to the `build` subdirectory in the project, which is where the compilation process places the executable file for this application. Then, we run the executable by entering the following command:\n\n`./air_quality_app 32836`\n\nAnd we see the response as follows:\n\n`Air Quality Index (AQI) for Zip Code 32836: 2 (Fair)`\n\n![cplus-plus-app-execution-output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/cplus-plus-app-execution-output.png)\n\nThis confirms to us that the application was successfully compiled and it’s executing appropriately.\n\n## Refactoring the application to Java\n\nLet’s start migrating this C++ application to Java. We take advantage of GitLab Duo Chat and its refactoring capabilities by using the slack command `/refactor`. We qualify the slash command with specific instructions on what to do for the refactoring. We enter the following command in the Chat input field:\n\n> /refactor this entire application to Java. Provide its associated pom.xml to build and run the Java application. Also, provide the directory structure showing where all the resulting files should reside for the Java application.\n\n![refactor-chat-output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/refactor-chat-output.png)\n\nChat returns a set of Java files that basically refactor the entire C++ application to the memory safe language. In addition and per the prompt, Chat returns the pom.xml file, needed by [maven](https://docs.gitlab.com/ee/api/packages/maven.html) for the building and execution of the refactored application as well as its directory structure, indicating where each generated file should reside.\n\nWe copy and save all the generated files to our local directory.\n\n## Creating the Java project\n\nIn VS Code, we now proceed to open an empty project in which we will set up the directory structure of the new Java application and its contents.\n\nWe create all the previously generated Java files in their corresponding directories in the new project and paste their contents in each.\n\n![java-files-created](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/java-files-created.png)\n\nLastly, we save all the files to our local disk.\n\n## Asking for help to build and run the Java application\n\nAt this point, we have an entire Java application that has been refactored from C++. Now, we need to build it but we don’t quite remember what maven command we need to use to accomplish this.\n\nSo we ask GitLab Duo Chat about this. We enter the following prompt in the Chat input field:\n\n> How do you build and run this application using maven?\n\n![maven-info-output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/maven-info-output.png)\n\nChat returns with a thorough explanation on how to do this, including examples of the maven command to build and run the newly created Java application.\n\n## Building and running the Java application\n\nGitLab Duo Chat understands the application and environment context and responds that we first need to create an environment variable called `API_KEY` before we can run the application.\n\nIt also provides the maven command to execute to build the application, which we enter in the Terminal window:\n\n```unset\nmvn clean package\n``` \n\n![java-build-output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/java-build-output.png)\n\nOnce the build finishes successfully, we copy the generated command to run the application from the Chat window and paste it in the Terminal window:\n\n```unset\njava -jar target/air-quality-checker-1.0-SNAPSHOT-jar-with-dependencies.jar 90210\n```\n\n![java-app-execution-output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/java-app-execution-output.png)\n\nThe application successfully executes and returns the string:\n\n```unset\nAir Quality Index (AQI) for Zip Code 90210: 2 (Fair)\n```\n\nWe have confirmed that the modernized version of the application, now refactored in Java, runs just like its original C++ version.\n\n## Watch this tutorial in action\n\nWe have seen that by leveraging the power of GitLab Duo in your modernization activities, you can save a great deal of time and effort, freeing you to spend more time innovating and creating value to your organization.\n\nHere is a video to show you, in action, the tutorial you just read:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/LJ7GOr_P0xs?si=_ZjF75DAXEQnY2Mn\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n> #### Want to get started with GitLab Duo? [Start a free, 60-day trial today!](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/)\n\n## Learn more\n- [Refactor code into modern languages with AI-powered GitLab Duo](https://about.gitlab.com/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo/)\n- [Secure by Design principles meet DevSecOps innovation in GitLab 17](https://about.gitlab.com/blog/secure-by-design-principles-meet-devsecops-innovation-in-gitlab-17/)\n- [How to secure memory-safe vs. manually managed languages](https://about.gitlab.com/blog/memory-safe-vs-unsafe/)\n",[9,482,724,184],{"slug":2523,"featured":6,"template":680},"modernizing-a-simple-c-application-to-java-with-gitlab-duo","content:en-us:blog:modernizing-a-simple-c-application-to-java-with-gitlab-duo.yml","Modernizing A Simple C Application To Java With Gitlab Duo","en-us/blog/modernizing-a-simple-c-application-to-java-with-gitlab-duo.yml","en-us/blog/modernizing-a-simple-c-application-to-java-with-gitlab-duo",{"_path":2529,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2530,"content":2536,"config":2542,"_id":2544,"_type":14,"title":2545,"_source":16,"_file":2546,"_stem":2547,"_extension":19},"/en-us/blog/navigating-the-ai-frontier-lessons-from-the-cutting-edge",{"title":2531,"description":2532,"ogTitle":2531,"ogDescription":2532,"noIndex":6,"ogImage":2533,"ogUrl":2534,"ogSiteName":693,"ogType":694,"canonicalUrls":2534,"schema":2535},"Navigating the AI frontier: Lessons from the cutting edge","Discover key insights on AI development, from rapid prototyping to production, evaluation frameworks, and emerging industry trends.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098682/Blog/Hero%20Images/Blog/Hero%20Images/duo-blog-post_1Cy89R1pY8OMwyrgSB525O_1750098682075.png","https://about.gitlab.com/blog/navigating-the-ai-frontier-lessons-from-the-cutting-edge","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Navigating the AI frontier: Lessons from the cutting edge\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michelle Gill\"},{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2024-09-10\",\n      }",{"title":2531,"description":2532,"authors":2537,"heroImage":2533,"date":2539,"body":2540,"category":10,"tags":2541},[2538,1175],"Michelle Gill","2024-09-10","As AI continues to evolve at a breakneck pace, developers and organizations are grappling with how to effectively integrate it into their workflows and products. At GitLab, we're always looking to stay at the forefront of these developments to better serve our community. Recently, our team attended the [AI Engineer World's Fair](https://www.ai.engineer/worldsfair), which provided valuable insights into the current state of [AI development](https://about.gitlab.com/gitlab-duo/). Here's what we learned.\n\n## Insights on AI development\n\n### The traditional development lifecycle is gone\n\nThe advent of AI has dramatically altered the traditional software development lifecycle. While AI prototypes can be created in minutes, advancing them to production takes significantly longer than before. This shift requires a rethink of our development approach, including:\n\n- streamlining processes where possible\n- adding new steps to accommodate AI-specific requirements\n- embracing rapid iteration and rewriting over multiple cycles\n\n### Speed is key\n\nIn the fast-paced world of AI, speed of iteration is crucial. Some key takeaways:\n\n- If productization takes more than three months, the product may be outdated by the time it's ready.\n- Aim to try more things faster than competitors.\n- Reduce iteration time and accelerate collaboration.\n- Implement systems that de-risk getting things wrong and allow for fearless changes.\n\n### AI development requires new, practical methods\n\nAI demands new ways of approaching development, including the following:\n\n1. Prioritize user experience early\n- Frontload user testing and prepare for evaluations early in the design phase.\n- Validate product needs with the best available model.\n- Consider trade-offs between achievability and value when selecting use cases.\n\n2. Take an iterative approach\n\n- Identify your base model (choose the best available).\n- Start your prompt template (involve domain experts and product managers).\n- Identify your data selection strategy.\n- Iterate on components separately and evaluate each step.\n\n3. Develop prompt engineering best practices\n\n- Standardize around a single query language.\n- Ask the large language model (LLM) to write code to solve problems rather than solving them directly.\n- Automate what you can to reduce degrees of freedom.\n- Use code for deterministic tasks outside the AI chain.\n- Break problems into smaller, more manageable pieces.\n- Leverage technical writers and domain experts for prompt crafting.\n\n### Evaluations need more time\n\nThe \"Great Eval Problem\" has flipped the traditional development timeline, with evaluations now taking a majority of the time. With LLMs, we have replaced the need for sophisticated model development approaches with an API call to a third party. However, we still require a significant amount of time to evaluate the responses. To address this:\n\n- Incorporate evaluations at every level using multiple techniques.\n- Evaluate different aspects at different stages (local, pre-production, production).\n- Focus on end-to-end evaluations that measure end-user value.\n- Consider user-centric evaluations and let features \"die with UX\" if necessary.\n\n### The customer PoV must be top of mind\n\nCustomer-centric considerations should be your focus. Here's how:\n\n- Maintain developer \"flow\" and reduce context switching.\n- Provide transparency into AI inputs and outputs.\n- Ease users into natural language interactions.\n- Consider human-in-the-loop approaches for complex tasks.\n\n### AI engineer roles are changing, pay attention\n\nAs the industry matures, the role of AI engineers is becoming more defined:\n- Requires production experience and product development competency.\n- Requires engaging with various personas (ML engineers, software engineers, domain experts).\n- Demands strong data intuition and the ability to extract meaning from data.\n\n## Looking ahead\n\nThe AI landscape continues to evolve rapidly. Some trends to watch:\n\n- unification of prompts across models\n- advancements in evaluation and prompt generation tools\n- the rise of \"slop\" (unrequested and unreviewed AI-generated content)\n- movement towards inline code completion and autonomous agents\n- improvements in fine-tuning, RAG workflows, and managed agents\n\nAs we navigate this exciting and rapidly changing field, it's crucial to stay informed, adapt quickly, and always keep the end user in mind. At GitLab, we're committed to incorporating these insights into our development processes and sharing our learnings with the community.\n\nWe encourage our developers and the wider community to explore these concepts further and contribute to the ongoing dialogue around AI development best practices. Together, we can shape the future of AI-driven software development.\n\n> ## Learn more about AI and DevSecOps\n> - Visit our [GitLab Duo site](https://about.gitlab.com/gitlab-duo/)\n> - Read our [\"Developing GitLab Duo\"](https://about.gitlab.com/blog/developing-gitlab-duo-series/) behind-the-scenes series\n> - Bookmark our [AI/ML blog page](https://about.gitlab.com/blog/categories/ai-ml/)\n",[9],{"slug":2543,"featured":6,"template":680},"navigating-the-ai-frontier-lessons-from-the-cutting-edge","content:en-us:blog:navigating-the-ai-frontier-lessons-from-the-cutting-edge.yml","Navigating The Ai Frontier Lessons From The Cutting Edge","en-us/blog/navigating-the-ai-frontier-lessons-from-the-cutting-edge.yml","en-us/blog/navigating-the-ai-frontier-lessons-from-the-cutting-edge",{"_path":2549,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2550,"content":2555,"config":2561,"_id":2563,"_type":14,"title":2564,"_source":16,"_file":2565,"_stem":2566,"_extension":19},"/en-us/blog/new-report-on-ai-assisted-tools-points-to-rising-stakes-for-devsecops",{"title":2551,"description":2552,"ogTitle":2551,"ogDescription":2552,"noIndex":6,"ogImage":2057,"ogUrl":2553,"ogSiteName":693,"ogType":694,"canonicalUrls":2553,"schema":2554},"New report on AI-assisted tools points to rising stakes for DevSecOps","Read the key findings from the \"Omdia Market Radar: AI-Assisted Software Development, 2023-24\" report, including the state of AI-based code assistants.","https://about.gitlab.com/blog/new-report-on-ai-assisted-tools-points-to-rising-stakes-for-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New report on AI-assisted tools points to rising stakes for DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rusty Weston, Guest Contributor\"}],\n        \"datePublished\": \"2024-02-14\",\n      }",{"title":2551,"description":2552,"authors":2556,"heroImage":2057,"date":2558,"body":2559,"category":10,"tags":2560},[2557],"Rusty Weston, Guest Contributor","2024-02-14","Small wonder that the buzz about deploying generative AI and large language models (LLMs) for code completion and code generation has focused almost exclusively on developer productivity. It's a significant milestone — but it’s not the entire story. Less widely understood is what AI-assisted tools can do for development teams and, more broadly, for organizational competitiveness. Combining AI-powered tools and integrated development environments (IDEs) doesn’t just pump up developer efficiency, it transforms the entire software development lifecycle (SDLC) while adding “layers” of safety enhancements. \n\nDevSecOps teams see firsthand that AI-assisted software tools help reduce software testing bottlenecks and improve security as they streamline workflows. In this new era, DevSecOps can simultaneously shorten the software development cycle, enforce security standards, and enhance output. In short, the right tools make organizations more competitive. \n\nJust as LLM quality improvements amplify the value of generative AI, the new class of AI-powered development tools must offer privacy- and transparency- controls to harness these models effectively. Utilizing rigorous controls, DevSecOps gains efficiencies and improves team collaboration while reducing AI adoption's security and compliance risks.\n\n## An analyst take on what matters  \nOne of the key findings of a new report called “[Omdia Market Radar: AI-Assisted Software Development, 2023–24](https://learn.gitlab.com/devsecops-plat-ai/analyst-omdia-ai)” is that “the use of AI-based code assistants has reached a level of proficiency such that enterprises not using this technology will be at a disadvantage.”\n\n> [Read the Omdia Market Radar report](https://learn.gitlab.com/devsecops-plat-ai/analyst-omdia-ai). \n\nFew may have anticipated the development community’s swift integration of AI-powered application development. Until recently, it’s been a gradual build. According to Omdia, “The application of AI to code assistance has been ongoing for the last decade with a focus on assisting professional developers.” After years of development, the report emphasizes that “this technology is now a permanent part of the landscape.”\n\nOmdia’s finding also tracks with the [GitLab 2023 Global DevSecOps Report: The State of AI in Software Development](https://about.gitlab.com/developer-survey/previous/2023/), which featured input from 1,000 global leaders in development, IT operations, and security. Today, nearly one-in-four DevSecOps teams have adopted AI tools, and another two-thirds plan to use AI in software development. In the GitLab report, more than half (55%) of teams heralded the promise of improved efficiency. At the same time, two in five respondents expressed concerns about whether AI-generated code may introduce security vulnerabilities.\n\n## Advocating a layered approach \n\nGiven potential risks such as LLM inaccuracy,  including widely documented [hallucinations](https://www.fastcompany.com/91006321/how-ai-companies-are-trying-to-solve-the-llm-hallucination-problem), Omdia cautions brands that “careless use of LLM output could harm and tarnish” their reputation. “To increase the accuracy of this technology and ensure that developers can use this technology safely and without violating license rules in the data used to train the models, there is a need to add layers on top of the foundation model.”  \n\nBy layers, Omdia emphasizes the value of “safety and enhancement” safeguards and filters. These layers create a “major differentiator” for AI-assisted development tools because they manage “training data licensing rules, the quality and accuracy of the generated output, and the prevention of insecure code.” The report's authors caution that “generated outputs need to be carefully evaluated” to ensure they are “safe and of high quality.”\n\nIn effect, the safeguards and filters in AI-assisted software development establish a “defense-in-depth” strategy for coding. That’s a concept in which “[attacks missed by one technology are caught by another](https://csrc.nist.gov/glossary/term/defense_in_depth),” which can also apply to any elevated digital risk, such as reputational harm. \n\n## A new perspective on GitLab Duo\n\nOmdia highlighted [GitLab Duo](https://about.gitlab.com/gitlab-duo/), the company’s suite of AI capabilities, as one of the products it considers “suitable for enterprise-grade application development,” noting that its “AI assistance is integrated throughout the SDLC pipeline.” \n\nAmong the report highlights:\n- “GitLab places an emphasis on respecting user privacy and being transparent in how it operates. In its selection of AI technology, it is agnostic to the models adopted and will use what it considers the best model for each use case.”  \n- “When GitLab looked at where developers were spending their time, it was only 25% on coding, and 75% was taken up by other necessary tasks: planning, onboarding, testing, documentation, and security. Therefore, GitLab applies AI to all these tasks, not just code generation assistance.”\u2028\n- “To ensure privacy, GitLab does not let its AI retain user data in any way and does not use client code to train its models.”\n- GitLab’s AI gateway is model agnostic, and “GitLab uses models from Google and [Anthropic](https://about.gitlab.com/blog/gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation/) to power GitLab Duo.”\u2028\n- Beyond code suggestions, developers “can ask Duo Explanation using natural language to explain what the code does.”\n\n## GitLab Duo introduces stronger controls\n\nFor DevSecOps teams, there’s no tradeoff between efficiency and security. Both are essential. GitLab Duo includes vital features such as Duo Code Suggestions and Chat, which enable AI-powered code completion, code generation, and chat, improving collaboration between developers, security, and operations teams. \n\nWith GitLab Duo, customer privacy is never subjected to tradeoffs. All customer code stays secret — it’s never applied to model training or fine-tuning. These practices are core to GitLab’s privacy- and transparency-first approach to team collaboration and security and reduce AI adoption's compliance risks. \n\nThe Omdia report notes that “software developers face greater complexity and hurdles today in producing code.” As a result, “There is a need to build in application security, including enforcing standards and triaging security vulnerabilities.” The report finds that GitLab has “security guardrails consistently applied throughout.” \n\nAdopters need tools that can help them tap AI’s benefits without introducing vulnerabilities or undermining compliance standards in ways that jeopardize trust with customers, partners, employees, and other critical stakeholders. DevSecOps teams seek tools to reduce the time, stress, and complexity of the entire application lifecycle.\n\n> [Read the Omdia Market Radar report](https://learn.gitlab.com/devsecops-plat-ai/analyst-omdia-ai).\n\n_Rusty Weston is an award-winning data-driven storyteller, editor, researcher, and writer. He formerly served as Editor of InformationWeek.com, Managing Editor at Yahoo!, and Vice President and Managing Editor for the Ogilvy content team._ \n",[9,703,1135],{"slug":2562,"featured":91,"template":680},"new-report-on-ai-assisted-tools-points-to-rising-stakes-for-devsecops","content:en-us:blog:new-report-on-ai-assisted-tools-points-to-rising-stakes-for-devsecops.yml","New Report On Ai Assisted Tools Points To Rising Stakes For Devsecops","en-us/blog/new-report-on-ai-assisted-tools-points-to-rising-stakes-for-devsecops.yml","en-us/blog/new-report-on-ai-assisted-tools-points-to-rising-stakes-for-devsecops",{"_path":2568,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2569,"content":2575,"config":2582,"_id":2584,"_type":14,"title":2585,"_source":16,"_file":2586,"_stem":2587,"_extension":19},"/en-us/blog/pipelines-as-code",{"title":2570,"description":2571,"ogTitle":2570,"ogDescription":2571,"noIndex":6,"ogImage":2572,"ogUrl":2573,"ogSiteName":693,"ogType":694,"canonicalUrls":2573,"schema":2574},"Pipelines-as-Code: How to improve speed from idea to production","Pipelines-as-Code streamline automatic building, testing, and deploying of applications using prebuilt pipelines and infrastructure components. Here's how it works.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663397/Blog/Hero%20Images/logoforblogpost.jpg","https://about.gitlab.com/blog/pipelines-as-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Pipelines-as-Code: How to improve speed from idea to production\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Robert Williams\"}],\n        \"datePublished\": \"2022-01-18\",\n      }",{"title":2570,"description":2571,"authors":2576,"heroImage":2572,"date":2578,"body":2579,"category":1032,"tags":2580},[2577],"Robert Williams","2022-01-18","\nToday’s DevOps platform-centric world is moving steadily towards an \"Everything-as-Code\" mentality. Add in cloud native, and it's clearly even more important to standardize how you define your DevOps processes.\n\n## Why ‘as-Code’?\n\nThanks to faster iteration, cloud native computing, and [microservices-based architectures]\n(https://about.gitlab.com/topics/microservices/), as-Code technologies have become the de-facto standard for a lot of different parts of the software development lifecycle. \n\nThe need to release faster requires a single spot for teams to collaborate on any kind of change – code, infrastructure, configuration, networking, or testing. And to implement that change quickly we need to be able to see and review it before it goes into production. \n\nAs-Code solutions are at the core of cloud native technologies such as Kubernetes, where you utilize YAML or JSON formats to configure and manage. Here are the key advantages of 'as-Code':\n\n- auditability\n- scalability\n- efficiency\n- collaboration\n\nThese benefits come into play with every piece of technology that moves into as-Code; we have seen it time and again as DevOps processes mature and we automate each piece of the software development lifecycle. Here are the critical 'as-Code' stages: \n\n### Build-as-Code\n\nOne of the first steps when building a new pipeline is to implement a way to build your application automatically. Containerization is one of the most common ways: You define your build steps as a Dockerfile and then you have automated the build of the application.\n\n### Test-as-Code\n\nAs our deployment frequency and team size scales, the need for test cases to be automated scales as well. So we automate, we write unit tests and test scripts to execute unit tests, and then we ensure the changes can be continuously integrated safely, without introducing unplanned bugs.\n\n### Security-as-Code\n\nTo ensure software gets to market quickly, security must be included in your testing process. The testing has to happen either through tools integrated with each individual project, or implemented as code, creating job templates for security scanners that can be ingested by projects as required. These steps enable teams to quickly become compliant with various security frameworks (like PCI-DSS, HIPAA,,or ISO) as they become relevant for the project.\n\n### Deployment-as-Code\n\nDeployments need to be standardized so they are predictable every time. To ensure successful peer review, production and development environment deployments need to be the same, and there's an added bonus of a quality gate between them. Through scripting and implementation of Deployment-as-Code, we end up with the ability to continuously deploy code and continuously deliver value.\n\n## Why Pipelines-as-Code?\n\nPipelines are the center of the CI/CD workflow – they're the automation heart that powers all of the benefits of as-Code technologies. Once you have the Build-as-Code, Test-as-Code, Deployment-as-Code, Infrastructure-as-Code, and Configuration-as-Code, you have all the parts needed to ensure that you can reliably and predictably take your application into production environments. But, to move changes in with agility, you need to take all those parts and string them together into a pipeline.\n\nThe technology behind Pipelines-as-Code makes it possible to create centralized repositories for your organization's pipelines. Pipelines-as-Code can be set up to fit all boxes for varied languages and use cases (like [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/)) or with a [number of options](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates) so that developers can pick base pipelines to fit their use case. It's important to have a baseline that conforms to the organization's standards because that always increases the speed to production.\n\nThe entire team can collaborate on changes to each part of the workflow. Version history can be easily maintained in the same version control system as everything else that touches the DevOps lifecycle.\n\nThe benefits of as-Code technology reach a pinnacle with Pipelines-as-Code, so teams gain increases in efficiency, scalability, auditability, and collaboration. Pipelines-as-Code are at the center of automated GitOps, DevOps, and SecOps workflows.\n",[9,2581,849],"cloud native",{"slug":2583,"featured":6,"template":680},"pipelines-as-code","content:en-us:blog:pipelines-as-code.yml","Pipelines As Code","en-us/blog/pipelines-as-code.yml","en-us/blog/pipelines-as-code",{"_path":2589,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2590,"content":2596,"config":2601,"_id":2603,"_type":14,"title":2604,"_source":16,"_file":2605,"_stem":2606,"_extension":19},"/en-us/blog/quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai",{"title":2591,"description":2592,"ogTitle":2591,"ogDescription":2592,"noIndex":6,"ogImage":2593,"ogUrl":2594,"ogSiteName":693,"ogType":694,"canonicalUrls":2594,"schema":2595},"Quick vulnerability remediation with GitLab Advanced SAST + Duo AI ","Shorten your mean time to remediation by pairing Advanced SAST and artificial intelligence. This detailed demo shows you how.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098458/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945_24mPf16vAPHORs3d9y62q_1750098458538.png","https://about.gitlab.com/blog/quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Quick vulnerability remediation with GitLab Advanced SAST + Duo AI \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2024-10-22\",\n      }",{"title":2591,"description":2592,"authors":2597,"heroImage":2593,"date":2598,"body":2599,"category":10,"tags":2600},[1391],"2024-10-22","With GitLab 17.4, we’ve made [GitLab Advanced SAST generally available](https://about.gitlab.com/blog/gitlab-advanced-sast-is-now-generally-available/). [GitLab Advanced SAST](https://docs.gitlab.com/ee/user/application_security/sast/gitlab_advanced_sast.html) is a static application security testing scanner designed to discover vulnerabilities by performing cross-function and cross-file taint analysis. By following the paths user inputs take, the analyzer identifies potential points where untrusted data can influence the execution of your application in unsafe ways, ensuring the vulnerabilities are detected even when they span multiple functions and files.\n\nGitLab Advanced SAST can be used together with [GitLab Duo Vulnerability Explanation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#explaining-a-vulnerability) in order to reduce the mean time to remediation (MTTR). GitLab Duo can provide practical, AI-powered examples of how threat actors can exploit vulnerabilities and offer light-weight remediation guidance, which can be used with cross-file analysis to enhance application security (AppSec) efficiency.\n\nThis tutorial will show you how to:\n* enable GitLab Advanced SAST\n* read results from the scanner\n* review the code flow of a vulnerability\n* use GitLab AI to quickly remediate the vulnerability\n\n## Enable GitLab Advanced SAST\n\nFollow the instructions below to enable GitLab Advanced SAST. You can also view this video to get started:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/xDa1MHOcyn8?si=5SYuKgP-BdBryqcU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Run GitLab Advanced SAST on each code commit\n\nBefore using Advanced SAST, the following prerequisites must be met:\n\n- GitLab Ultimate Subscription ([free 30-day trial](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com%2F))\n- GitLab SaaS or GitLab Self-managed (running Version 17.4)\n\nTo enable the GitLab Advanced SAST scanner:\n\n- On the left sidebar, select **Search** or **Go to** and find your project.\n- Add or edit the `.gitlab-ci.yml` to include the following:\n    - Test stage\n    - `Jobs/SAST.gitlab-ci.yml` template\n    - `GITLAB_ADVANCED_SAST_ENABLED` variable set to true\n- Apply the change.\n\nYour newly merged `.gitlab-ci.yml` should contain the following:\n\n```yaml\nstages:\n  - test\n\ninclude:\n  - template: Jobs/SAST.gitlab-ci.yml\n\nvariables:\n  GITLAB_ADVANCED_SAST_ENABLED: 'true'\n```\n\nThis will now run the `gitlab-advances-sast` job within the test stage of your application along with all the other jobs you have defined. Advanced SAST will replace the semgrep SAST scanner for the [supported programming languages](https://docs.gitlab.com/ee/user/application_security/sast/gitlab_advanced_sast.html#supported-languages).\n\n![Running `gitlab-advances-sast` job within the test stage of your application](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098466/Blog/Content%20Images/Blog/Content%20Images/1_aHR0cHM6_1750098466629.png)\n\n\u003Ccenter>\u003Ci>GitLab Advanced SAST job in pipeline\u003C/i>\u003C/center>\n\n\u003Cbr>\u003C/br>\n\n**Note:** You can fully configure the job as you would any job in GitLab. For more information, see the [CI/CD YAML syntax documentation](https://docs.gitlab.com/ee/ci/yaml/).\n\n## Remediate vulnerabilities in merge request (pre-production)\n\nJust like our previous SAST scanner, Advanced SAST allows you to scan source code in the diff of a feature branch. This allows us to address any incoming vulnerabilities before they make it into production. Here we can see the scanner results for the diff within a merge request:\n\n![Advanced SAST scanner results for the diff within a merge request](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/2_aHR0cHM6_1750098466630.png)\n\nWhen selecting a newly detected vulnerability, we get the following details to assist with remediation:\n\n- **Status:** The status of the vulnerability (Needs triage, Confirmed, Dismissed, Resolved)\n- **Description:** Detailed information on the detected vulnerability\n- **Detection time:** Time vulnerability was detected\n- **Location:** Line of code where vulnerability is detected\n- **Severity:** Severity of vulnerability from CVE database\n- **Training:** Gamified training from our partners\n- **Solutions:** Information on how to remediate or resolve a vulnerability\n- **Identifiers:** Relevant links showcasing detailed description, exploitation, and remediation\n\n![Merge request with vulnerability insights](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/MR_with_vulnerability_insights_aHR0cHM6_1750098466632.png)\n\n\u003Ccenter>\u003Ci>Merge request with vulnerability insights\u003C/i>\u003C/center>\n\n\u003Cbr>\u003C/br> \nVulnerabilities detected within an MR are actionable, meaning they can be dismissed or an issue can be created and populated with relevant vulnerability information.\n\nDismissing an issue saves AppSec teams time, because they can see relevant developer information when reviewing an MR. Creating a confidential issue allows developers and AppSec teams to further collaborate on resolving a vulnerability where a fix is not straightforward. Confidential issues have limited permissions and can be used with confidential merge requests to prevent possible malicious actors from exploiting.\n\nTo further support separation of duties and prevent vulnerable code from making it into production, you can require approval from certain people (for example, the security team) in order to merge vulnerable code.\n\n![GitLab security policies in action](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/security_policies_in_action_aHR0cHM6_1750098466634.png)\n\n\u003Ccenter>\u003Ci>Security policies in action\u003C/i>\u003C/center>\n\n\u003Cbr>\u003C/br>\n\n**Note:** Learn more about Security Policies and how to implement them in the [Security Policy documentation](https://docs.gitlab.com/ee/user/application_security/policies/).\n\n## Manage vulnerabilities in production\n\nWhile preventing vulnerabilities from making it into production is crucial for application security, it is equally as important to manage vulnerabilities in production. When security scanners are run on a default or production-level branch, a [vulnerability report](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/) will be populated with the latest vulnerability data which can be used to triage and manage vulnerabilities.\n\n![GitLab Vulnerability Report sorted by Advanced SAST](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/5_aHR0cHM6_1750098466636.png)\n\n\u003Ccenter>\u003Ci>GitLab Vulnerability Report sorted by Advanced SAST\u003C/i>\u003C/center>\n\u003Cbr>\u003C/br>\n\nWhen selecting a vulnerability you get similar vulnerability details as seen in a merge request, making for a single source of truth for developers and AppSec teams.\n\n![Vulnerability page with vulnerability insights](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/Vuln_page_with_vulnerability_insights_aHR0cHM6_1750098466637.png)\n\n\u003Ccenter>\u003Ci>Vulnerability page with vulnerability insights\u003C/i>\u003C/center>\n\n\u003Cbr>\u003C/br>\n\nAppSec teams can triage a vulnerability by changing its status and adding relevant details on the status change. Issues can be created to track the progress of a fix. From here, a developer can be assigned.\n\n## Examine vulnerable code flow\n\nFor vulnerabilities detected with Advanced SAST, we can see a \"Code flow\" tab on the Vulnerability page.\n\n![Advanced SAST - image 7](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/7_aHR0cHM6_1750098466638.png)\n\n\u003Ccenter>\u003Ci>GitLab Advanced SAST code flow\u003C/i>\u003C/center>\n\u003Cbr>\u003C/br>\n\nIn this example, you can see that a vulnerability is traced across multiple functions, giving deeper insight into the best practices we should put in place to not only resolve the vulnerability, but prevent similar vulnerabilities in the future.\n\n## Use GitLab Duo Vulnerability Explanation\n\nGitLab Duo can help you mitigate or remediate a vulnerability by using a large language model to:\n\n- Summarize the vulnerability\n- Help developers and security analysts understand the vulnerability\n- Show how the vulnerability can be exploited\n- Provide a suggested remediation or mitigation\n\nTo use Vulnerability Explanation, the following is required:\n\n- GitLab Ultimate subscription\n- GitLab Duo Enterprise seat\n- GitLab Duo must be enabled for your group or instance\n\nFrom the vulnerability report, you can select a SAST vulnerability and go to its Vulnerability page. From the Vulnerability page, you can do any of the following to explain the vulnerability:\n\n- Select the text below the vulnerability description\n- You can use AI by asking GitLab Duo Chat to explain this vulnerability and offer a suggested fix.\n- In the upper right, from the \"Resolve with merge request\" dropdown list, select **Explain Vulnerability**, then select **Explain vulnerability**.\n- Open GitLab Duo Chat and use the explain a vulnerability command: `/vulnerability_explain`.\n\nThen the vulnerable code will be processed by Anthropic’s Claude 3 Haiku model and provide the following data:\n\n![GitLab Duo Vulnerability Explanation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/vuln_explain_2_aHR0cHM6_1750098466640.png)\n\n## Putting it all together\n\nNow, let's put it all together with a concrete example. I will use the [OWASP Juice Shop](https://owasp.org/www-project-juice-shop/) as my demo application and run GitLab Advanced SAST to detect a vulnerability in production. Then I will use the vulnerability code flow and GitLab Duo to investigate vulnerability exploitation, and remediation. You can [follow along with this demo](https://gitlab.com/gitlab-da/tutorials/security-and-governance/owasp/juice-shop) and see this workflow in action by watching:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/H1S43oM44k0?si=2LYorTjByOHbCAko\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThe detection and remediation workflow is as follows:\n\n- Enable GitLab Advanced SAST and run it on the project’s default branch.\n- Open the Vulnerability Report and sort by **Tool:GitLab Advanced SAST**.\n- Select the **Improper neutralization of special elements in data query logic** vulnerability found in `Basket.ts`.\n- Use the vulnerability code flow to understand the vulnerable paths.\n- Run **Explain this vulnerability** to see exploit information.\n- Run the application locally to attempt exploitation.\n- Change vulnerability status to \"Confirmed\" and provide relevant info.\n- Determine remediation path using all relevant data:\n    - Vulnerability page insights, Code Flow, Vulnerability Explanation results\n- Create a new branch and apply remediation.\n- Run the remediated application locally and try to exploit again.\n- Create a merge request with the fix.\n- Code change will be tested using CI to assure we don’t break the application.\n- Validate and merge MR.\n- Test exploit in deployed environment.\n- Change vulnerability status to \"Resolved\" on the Vulnerability page.\n\n**Note:** There are many ways to triage and remediate vulnerabilities, make sure to follow best practices set by your organization.\n\n# Useful links\n\nTo learn more about GitLab and how you can get started with enhancing your organization’s application security posture, check out the following resources.\n\n* [GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/) \n* [GitLab Duo](https://about.gitlab.com/gitlab-duo/)  \n* [GitLab Security and Compliance Solutions](https://about.gitlab.com/solutions/security-compliance/)  \n* [GitLab Software Supply Chain Security Solutions](https://about.gitlab.com/solutions/supply-chain/)  \n* [GitLab Continuous Software Compliance](https://about.gitlab.com/solutions/continuous-software-compliance/)  \n* [JuiceShop Demo Application](https://gitlab.com/gitlab-da/tutorials/security-and-governance/owasp/juice-shop)  \n* [GitLab AppSec documentation](https://docs.gitlab.com/ee/user/application_security/)  \n* [Advanced SAST  documentation](https://docs.gitlab.com/ee/user/application_security/sast/gitlab_advanced_sast.html)  \n* [Explain this Vulnerability documentation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#explaining-a-vulnerability)  \n* [Code Flow documentation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#vulnerability-code-flow)  \n* [Security Policy documentation](https://docs.gitlab.com/ee/user/application_security/policies/) \n* [OWASP Juice Shop documentation](https://owasp.org/www-project-juice-shop/)\n",[9,678,724,704,482],{"slug":2602,"featured":91,"template":680},"quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai","content:en-us:blog:quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai.yml","Quick Vulnerability Remediation With Gitlab Advanced Sast Duo Ai","en-us/blog/quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai.yml","en-us/blog/quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai",{"_path":2608,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2609,"content":2615,"config":2619,"_id":2621,"_type":14,"title":2622,"_source":16,"_file":2623,"_stem":2624,"_extension":19},"/en-us/blog/quickly-resolve-broken-ci-cd-pipelines-with-ai",{"title":2610,"description":2611,"ogTitle":2610,"ogDescription":2611,"noIndex":6,"ogImage":2612,"ogUrl":2613,"ogSiteName":693,"ogType":694,"canonicalUrls":2613,"schema":2614},"Quickly resolve broken CI/CD pipelines with AI","When your CI/CD pipeline fails, it leads to delays, decreased productivity, and stress. AI-powered Root Cause Analysis makes problem-solving faster and smarter.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097355/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2811%29_78Dav6FR9EGjhebHWuBVan_1750097355230.png","https://about.gitlab.com/blog/quickly-resolve-broken-ci-cd-pipelines-with-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Quickly resolve broken CI/CD pipelines with AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2024-12-03\",\n      }",{"title":2610,"description":2611,"authors":2616,"heroImage":2612,"date":1831,"body":2617,"category":10,"tags":2618},[1735],"CI/CD pipelines are the backbone of efficiency in software development. They help teams test, build, and deploy code quickly. But when these pipelines break, everything slows down — deadlines get missed, and developers are left frustrated as they work to fix things and keep projects on track.\n\n![CI/CD pipeline with multiple failed jobs](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097362/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097362772.png)\n\n\u003Ccenter>\u003Ci>CI/CD pipeline with multiple failed jobs\u003C/i>\u003C/center>\u003Cbr>\u003C/br>\n\n**So, why do pipelines break in the first place?** Let’s break it down.\n\n## Reasons for pipeline failures\n\nA pipeline failure occurs when the automated workflow in your [CI/CD pipeline](https://about.gitlab.com/topics/ci-cd/cicd-pipeline/) — a series of steps that can include building, testing, and deploying code — does not execute as expected and ends with an error message. This failure can prevent code from being properly built, tested, or deployed, causing delays in software delivery and requiring troubleshooting to resolve. \n\nPipeline failures can happen for a variety of reasons. Some common causes include:\n- Syntax errors: A small mistake in the code, like a missing semicolon or incorrect variable name, can cause the pipeline to fail.\n- Failed tests: Unit or integration tests might fail due to broken code, incorrect configurations, or mismatched dependencies.\n- Misconfigurations: Incorrect pipeline settings or environment configurations can lead to failed builds or deployments.\n\nThere are also more complex issues that add to the challenge:\n- Infrastructure-as-Code ([IaC](https://about.gitlab.com/topics/gitops/infrastructure-as-code/)) issues: Problems in provisioning cloud infrastructure, such as errors in Terraform scripts or CloudFormation templates, can prevent a successful deployment.\n- Kubernetes and GitOps challenges: Misconfigurations in [Kubernetes clusters](https://about.gitlab.com/blog/kubernetes-the-container-orchestration-solution/) or issues with [GitOps](https://about.gitlab.com/topics/gitops/) workflows (e.g., syncing Kubernetes states with Git repositories) can cause pipeline failures that are difficult to diagnose.\n- Long, messy stack traces: When an error occurs deep in the system, stack traces can become long and hard to decipher, especially when they span multiple components or services.\n\nThese challenges make troubleshooting more difficult and time-consuming, as finding the root cause often involves sifting through complex logs, reviewing configuration files, and testing different solutions.\n\n## The real impact of failed pipelines\n\nWhen a pipeline fails, it doesn’t just delay your deployment — it brings stress and frustration. Developers are forced to pause their work and dive into troubleshooting, which often leads to a chain reaction of disruptions. This makes it harder to meet deadlines and increases the pressure on the entire team. But why is manual troubleshooting so stressful?\n\n### Manual troubleshooting \n\nThe time it takes to fix a broken pipeline varies. It depends on things like:\n- How well the developer knows the project\n- How experienced they are with similar issues\n- Their overall problem-solving skills\n\nManually digging through logs to figure out what went wrong is a tough and tedious process. Logs can come from all over the place, including application errors and system messages, and they’re often messy and hard to interpret. And on top of that, fixing the pipeline usually requires a lot of jumping back and forth between tasks, adding more time to the process.\n\nThis is where [GitLab Duo](https://about.gitlab.com/gitlab-duo/) comes in. GitLab Duo can sift through all that messy data and spot issues much faster, simplifying the process so you don’t need to be an expert to figure out what went wrong. With AI, fixing your pipelines becomes faster, easier, and much less stressful.\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176104/Blog/zxvvu7p9vc3qpmwl32ya.png\" alt=\"broken pipeline\">\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176108/Blog/bpx6dqilfhltzboyp8k8.png\" alt=\"fix suggestions for broken pipelines\">\n\n## GitLab Duo Root Cause Analysis with generative AI\n\nWhen your CI/CD pipeline breaks, you don’t have to spend hours manually troubleshooting. Enter [GitLab Duo’s Root Cause Analysis (RCA)](https://docs.gitlab.com/ee/user/gitlab_duo/#root-cause-analysis). This AI-powered tool quickly identifies the exact cause of the failure and suggests fixes — right within the DevSecOps platform. No matter how long or complicated your stack traces are, RCA analyzes all the data, breaks it down, and gives you clear, actionable insights.\n\n**It tells you exactly what caused the error, provides steps to fix it, and even pinpoints the specific files and lines of code that need attention.** And, to make it even easier, it suggests code fixes to get everything back on track. This makes troubleshooting a lot faster and more straightforward.\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176111/Blog/nmagby9hoksskogve53m.png\" alt=\"root cause of failure\">\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176115/Blog/dndis1cedwbmbnj33q3v.png\" alt=\"example fix\">\n\n## Keep the conversation going with follow-up questions\n\nWith GitLab Duo RCA, you don’t just get answers — you can ask follow-up questions to dig deeper. Want to explore alternative solutions? No problem. You can add [more context](https://docs.gitlab.com/ee/user/gitlab_duo_chat/index.html#the-context-chat-is-aware-of) by referencing other files, issues, or epics in your repo. For example, you could open your `.gitlab-ci.yml` file in the IDE and ask the chat, “Based on this file, and the analyzed CI/CD pipeline, how would you propose to optimize the pipeline?” \n\n## Privacy first – everything stays in GitLab\nOne of the key benefits of GitLab Duo RCA is that it works right out of the box within GitLab. You won’t have to switch tools or go hunting for external help. Plus, your [logs and sensitive data stay secure](https://about.gitlab.com/privacy/) - there’s no need to send them off to external AI solutions. RCA is seamlessly integrated within GitLab, offering valuable insights without ever compromising privacy.\n\n![broken pipelines - image 6](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097363/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097362773.png)\n\n## Get started today\n\nWant to see how AI can supercharge your development process, making it smoother and faster? Dive into our GitLab Duo Enterprise product tour below and discover how GitLab Duo’s AI-powered insights can transform every stage of your development journey — from planning and coding to troubleshooting and deployment. Click the image below to start the tour!\n\n[![GitLab Duo Enterprise tour](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097363/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2024-12-02_at_12.41.10_PM_aHR0cHM6_1750097362774.png)](https://gitlab.navattic.com/duo-enterprise)\n\n> [Start a free, 60-day trial of GitLab Duo today!](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/)",[9,482,724,704],{"slug":2620,"featured":6,"template":680},"quickly-resolve-broken-ci-cd-pipelines-with-ai","content:en-us:blog:quickly-resolve-broken-ci-cd-pipelines-with-ai.yml","Quickly Resolve Broken Ci Cd Pipelines With Ai","en-us/blog/quickly-resolve-broken-ci-cd-pipelines-with-ai.yml","en-us/blog/quickly-resolve-broken-ci-cd-pipelines-with-ai",{"_path":2626,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2627,"content":2633,"config":2637,"_id":2639,"_type":14,"title":2640,"_source":16,"_file":2641,"_stem":2642,"_extension":19},"/en-us/blog/rail-m-is-an-imperfectly-good-start-for-ai-model-licenses",{"title":2628,"description":2629,"ogTitle":2628,"ogDescription":2629,"noIndex":6,"ogImage":2630,"ogUrl":2631,"ogSiteName":693,"ogType":694,"canonicalUrls":2631,"schema":2632},"RAIL-M is an imperfectly good start for AI model licenses","\"GitLab, Inc. is dedicated to open source and AI. This is our take on a model license relevant to open source and AI communities: the BigScience Open RAIL-M license.\"","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671836/Blog/Hero%20Images/railmimage.jpg","https://about.gitlab.com/blog/rail-m-is-an-imperfectly-good-start-for-ai-model-licenses","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"RAIL-M is an imperfectly good start for AI model licenses\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Robin Schulman\"}],\n        \"datePublished\": \"2023-07-25\",\n      }",{"title":2628,"description":2629,"authors":2634,"heroImage":2630,"date":1910,"body":2635,"category":10,"tags":2636},[2221],"GitLab, Inc. is dedicated to open source – we believe in it, use it, and give back to it, and we have an [open core](https://about.gitlab.com/company/stewardship/) business model. We also care deeply about artificial intelligence (AI) – we recently announced that we are investing heavily in AI by [infusing it into every phase of our comprehensive DevSecOps platform](https://about.gitlab.com/solutions/ai/).\n\nWe were thus very interested to see [Responsible AI Licenses'](https://www.licenses.ai/) recent release of a model license relevant to both the open source and AI communities: the BigScience Open RAIL-M license ([RAIL-M](https://www.licenses.ai/blog/2022/8/26/bigscience-open-rail-m-license)).\n\nWe see RAIL-M as an exciting but flawed development in the AI model licensing space. Its authors’ intentions are admirable and important, but in practical terms, RAIL-M still has room for improvement.\n\n### What is RAIL-M?\nRAIL-M is part of the Open Responsible AI Licenses (Open RAIL) [family](https://www.licenses.ai/blog/2022/8/18/naming-convention-of-responsible-ai-licenses), which is a collection of AI licenses that aim to promote responsible use by imposing behavioral use restrictions on the model’s licensees and downstream users. The Open RAIL family is not alone – it’s within a new wave of licenses (see, for example, the [TII Falcon LLM License](https://huggingface.co/tiiuae/falcon-40b/blob/main/LICENSE.txt)) spawned by the public’s recent interest in AI. RAIL-M specifically applies these use restrictions to the model (the “M” in RAIL-M stands for “model”).\n\nThis blog discusses RAIL-M specifically, and does not consider the other licenses in the Open RAIL family.\n\n### RAIL-M puts ethics at the forefront\nIn recent months, much ink has been spilled over the novel ethical dilemmas that AI presents. Technologists, journalists, and companies alike have sounded the alarm on the various societal harms that AI could exacerbate (see, for example, OpenAI, Google Deepmind, and other AI companies’ [recent open letter](https://www.nytimes.com/2023/05/30/technology/ai-threat-warning.html) declaring that AI poses a “risk of extinction”). Regulators are taking notice. Recently, OpenAI’s CEO Sam Altman testified in a Senate hearing on AI’s risks, and a key committee of European Parliament lawmakers [approved the EU AI Act](https://www.europarl.europa.eu/news/en/press-room/20230505IPR84904/ai-act-a-step-closer-to-the-first-rules-on-artificial-intelligence), which aims to mitigate AI’s potential harms.\n\nRAIL-M places these ethical considerations front and center. Its restrictions prohibit the AI model’s licensees and their downstream users from engaging in potentially harmful uses such as applying the model in a way that violates applicable law, to provide medical advice, or to harass or defraud others.\n\nThese provisions’ practical implications are, admittedly, still a bit unclear. Regardless, these use-based restrictions will, at the very least, deter some from applying the model in harmful ways, and help push ethical considerations to the forefront of today’s fast-paced AI landscape. In the words of the Organisation for Economic Co-operation and Development ([OECD](https://oecd.ai/en/catalogue/tools/bigscience-openrail-m-license)): “OpenRAILs are a vehicle towards the consolidation of an informed and respectful culture of sharing AI artifacts acknowledging their limitations and the values held by the licensors of the model.”\n\n### In practice, RAIL-M isn’t perfect\nFirst, describing RAIL-M as an “open” license – as RAIL-M’s authors have in its title – is misleading. RAIL-M’s authors conflate royalty-free access and flexible use and re-distribution with truly “open” licenses. The Open Source Initiative ([OSI](https://opensource.org/osd/)) defines “open source” as software that, among other qualities, “must not restrict anyone from making use of the program in a specific field of endeavor.” RAIL-M’s use-based restrictions – which include prohibitions on providing medical advice, and generating information to be used for the administration of justice or law enforcement – prevent it from being a truly “open” license.\n\nSecond, regulators such as those in the EU will likely pass laws imposing certain use restrictions on AI tools in the near future. RAIL-M doesn’t cover how its own use-based requirements will interact with AI-related laws, which may present an issue if, for example, a RAIL-M restriction conflicts with one of these new regulations.\n\nFinally, commentators, including [Kyle Mitchell](https://writing.kemitchell.com/2023/01/26/Open-RAIL-M-Unclear) and [Luis Villa](https://blog.tidelift.com/evaluating-the-rail-license-family), have also expressed concerns that some of RAIL-M’s requirements may be too vague to comply with.\n\n### A net benefit to the AI community\nRAIL-M isn’t perfect. However, setting aside its practical flaws, RAIL-M’s release is still an important signal both to and from the AI community that AI ethics matter and must be considered even (and perhaps especially) when offering free, publicly-available models. To again quote [the OECD](https://oecd.ai/en/catalogue/tools/bigscience-openrail-m-license): “[l]icenses [like those in the Open RAIL family] … should not be conceived as burdensome legal technical mechanisms, but rather as a communication instrument among AI communities bringing stakeholders together by sharing common messages on how the licensed artifact can be used.”\n\nRAIL-M, and the Open RAIL family as a whole, will likely encourage the AI community – both AI model maintainers and perhaps even proprietary model creators – to consider, and work to mitigate, their models’ potential harms and abuses. We’ll be interested to see where it goes.\n\n_Cover image by [Google DeepMind](https://unsplash.com/@deepmind?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/ZJKE4XVlKIA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)_\n",[9,1036],{"slug":2638,"featured":6,"template":680},"rail-m-is-an-imperfectly-good-start-for-ai-model-licenses","content:en-us:blog:rail-m-is-an-imperfectly-good-start-for-ai-model-licenses.yml","Rail M Is An Imperfectly Good Start For Ai Model Licenses","en-us/blog/rail-m-is-an-imperfectly-good-start-for-ai-model-licenses.yml","en-us/blog/rail-m-is-an-imperfectly-good-start-for-ai-model-licenses",{"_path":2644,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2645,"content":2650,"config":2655,"_id":2657,"_type":14,"title":2658,"_source":16,"_file":2659,"_stem":2660,"_extension":19},"/en-us/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo",{"title":2646,"description":2647,"ogTitle":2646,"ogDescription":2647,"noIndex":6,"ogImage":903,"ogUrl":2648,"ogSiteName":693,"ogType":694,"canonicalUrls":2648,"schema":2649},"Refactor code into modern languages with AI-powered GitLab Duo ","This detailed tutorial helps developers use AI to modernize code by switching to a new programming language and gain knowledge about new features in the same language.","https://about.gitlab.com/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Refactor code into modern languages with AI-powered GitLab Duo \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2024-08-26\",\n      }",{"title":2646,"description":2647,"authors":2651,"heroImage":903,"date":2652,"body":2653,"category":10,"tags":2654},[1217],"2024-08-26","Whether you are tasked with modernizing the code base or framework by switching to a new programming language, or you need knowledge about new language features in the same language, AI-powered [GitLab Duo](https://about.gitlab.com/gitlab-duo/) can help. Learn how to approach code refactoring challenges with best practices using examples from the past 20 years of my coding career. \n\nThe prompts and examples in this article are shown in different IDEs: VS Code and JetBrains IDEs (IntelliJ IDEA, PyCharm, and CLion) with the [GitLab Duo extensions/plugins](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html) installed. The development environment uses GitLab.com, including updates to Anthropic Claude 3.5 as Large Language Model (LLM) for GitLab Duo [Code Suggestions](https://docs.gitlab.com/ee/user/gitlab_duo/#code-suggestions) and [Chat](https://docs.gitlab.com/ee/user/gitlab_duo/#gitlab-duo-chat). Spoiler: They are even more powerful and efficient.\n\nYou can navigate into each section of the article, or read top-down. The source code and challenges with exercises are provided for self-learning, too.\n\n- [Refactor code to modern programming language standards](#refactor-code-to-modern-programming-language-standards)\n    - [Generate Java 7 and refactor to Java 8](#generate-java-7-and-refactor-to-java-8)\n    - [Refactor across C++ standards](#refactor-across-c%2B%2B-standards)\n        - [Migration: Refactor C++03 into C++14](#migration-refactor-c%2B%2B03-into-c%2B%2B14)\n        - [Downgrade: Refactor C++23 to C++11](#downgrade-refactor-c%2B%2B23-to-c%2B%2B11)\n    - [Explain and refactor COBOL](#explain-and-refactor-cobol)\n- [Refactor a language into another language](#refactor-a-language-into-another-language)\n    - [Refactor C to Rust](#refactor-c-to-rust)\n    - [Refactor Perl to Python](#refactor-perl-to-python)\n- [More refactoring hands-on](#more-refactoring-hands-on)\n    - [Refactor JavaScript](#refactor-javascript)\n    - [Refactor Bash to ZSH or SH](#refactor-bash-to-zsh-or-sh)\n    - [More use cases and tutorials](#more-use-cases-and-tutorials)\n- [Key takeaways](#key-takeaways)\n\n## Refactor code to modern programming language standards\n\nThe next sections show how to refactor, explain, and understand the language specific differences in a defined programming language. \n\n### Generate Java 7 and refactor to Java 8 \n\nA customer wanted to know if GitLab Duo supports Java 7. While testing the Code Suggestions and Chat prompts to generate Java 7 specific code, I also thought of migration help. Anthropic Claude 3.5 is capable of understanding the language-specific differences, and so is GitLab Duo.\n\nThe following example shows a prompt to generate Java 7 code with Code Suggestions.\n\n```java\n// Generate a new class for a File Manager. Use Java 7 features only.\nimport java.io.*;\nimport java.nio.file.*;\nimport java.util.*;\n\npublic class FileManager {\n    private Path rootDirectory;\n\n    public FileManager(String rootPath) throws IOException {\n        this.rootDirectory = Paths.get(rootPath);\n        if (!Files.exists(rootDirectory)) {\n            Files.createDirectories(rootDirectory);\n        }\n    }\n\n    public void createFile(String fileName, String content) throws IOException {\n        Path filePath = rootDirectory.resolve(fileName);\n        Files.write(filePath, content.getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);\n    }\n\n    public String readFile(String fileName) throws IOException {\n        Path filePath = rootDirectory.resolve(fileName);\n        return new String(Files.readAllBytes(filePath));\n    }\n\n    public void deleteFile(String fileName) throws IOException {\n        Path filePath = rootDirectory.resolve(fileName);\n        Files.delete(filePath);\n    }\n\n    public List\u003CString> listFiles() throws IOException {\n        List\u003CString> fileList = new ArrayList\u003C>();\n        try (DirectoryStream\u003CPath> stream = Files.newDirectoryStream(rootDirectory)) {\n            for (Path file : stream) {\n                fileList.add(file.getFileName().toString());\n            }\n        }\n        return fileList;\n    }\n\n    public void copyFile(String sourceFileName, String destinationFileName) throws IOException {\n        Path sourcePath = rootDirectory.resolve(sourceFileName);\n        Path destinationPath = rootDirectory.resolve(destinationFileName);\n        Files.copy(sourcePath, destinationPath, StandardCopyOption.REPLACE_EXISTING);\n    }\n\n    public void moveFile(String sourceFileName, String destinationFileName) throws IOException {\n        Path sourcePath = rootDirectory.resolve(sourceFileName);\n        Path destinationPath = rootDirectory.resolve(destinationFileName);\n        Files.move(sourcePath, destinationPath, StandardCopyOption.REPLACE_EXISTING);\n    }\n\n    public long getFileSize(String fileName) throws IOException {\n        Path filePath = rootDirectory.resolve(fileName);\n        return Files.size(filePath);\n    }\n\n    public boolean fileExists(String fileName) {\n        Path filePath = rootDirectory.resolve(fileName);\n        return Files.exists(filePath);\n    }\n}\n```\n\nYou can continue with the following steps in your IDE:\n\n1. Learn and understand the generated code: Select the code in the editor (blue color in the screenshot), switch into GitLab Duo Chat and send the `/explain` slash command as prompt.\n\n![Validate the generated code with the `/explain` slash command in GitLab Duo Chat](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/intellij_java7_generate_refactor.png)\n\n2. Refactor the Java 7 code into Java 8: Select the code in the editor, switch into Chat, and send `/refactor using Java 8 features` as refined prompt.\n3. Practice an alternative to refactoring: Create a new file `java8.java` and generate Java 8 specific code using Code Suggestions with the prompt `// Generate a new class for a File Manager. Use Java 8 features only.`.\n4. Continue writing Java 8 code in the same context with AI-powered code completions.\n\nYou can watch all steps in this recording.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/XKRv6uBkD2I\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThe source code is available in the [GitLab Duo Challenge - Generate and refactor Java 7 to 8 project](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-generate-refactor-java-7-to-8).\n\n### Refactor across C++ standards\n\nThe C++ standard has a long history and can be considered stable and mature. Depending on the target architecture and platforms, the supported standard depends on the compiler on the host operating system.\n\nIn my career, I needed to support 25 different Linux and Windows distributions in open source projects, and have seen varying compiler support for newer language features and C++ standards. In 2015, we settled on C++11 features, and later never touched based on C++14 or C++17. \n\nRefactoring functions and algorithms to a new C++ standard would have required too much effort, and help explaining the differences was needed. Back then, we researched through books, online search, Stack Overflow, and sometimes tools assistance for refactoring.\n\nThings have changed with AI-powered assistants and GitLab Duo: You can instruct Code Suggestions and Chat to provide source code in a specific C++ standard. Then, it is also possible to compare the different outputs. \n\nThe following example uses a prompt to specifically generate source code for the C++03 standard. It implements a simple config parser for the `.ini` configuration format.\n\n```c++\n/* Create a config parser which can parse an ini format\nUse C++03 standard only\n */\n\n#include \u003Ciostream>\n#include \u003Cfstream>\n#include \u003Cstring>\n#include \u003Cmap>\n#include \u003Csstream>\n\nclass ConfigParser {\nprivate:\n    std::map\u003Cstd::string, std::map\u003Cstd::string, std::string> > config;\n\npublic:\n    ConfigParser() {}\n\n    bool parse(const std::string& filename) {\n        std::ifstream file(filename.c_str());\n        if (!file.is_open()) {\n            return false;\n        }\n\n        std::string line;\n        std::string currentSection;\n\n        while (std::getline(file, line)) {\n            // Trim whitespace\n            size_t first = line.find_first_not_of(\" \\t\");\n            size_t last = line.find_last_not_of(\" \\t\");\n            if (first == std::string::npos) continue; // Skip empty lines\n            line = line.substr(first, (last - first + 1));\n\n            if (line[0] == '[' && line[line.length() - 1] == ']') {\n                // This is a section\n                currentSection = line.substr(1, line.length() - 2);\n            } else {\n                // This is a key-value pair\n                size_t delimiterPos = line.find('=');\n                if (delimiterPos != std::string::npos) {\n                    std::string key = line.substr(0, delimiterPos);\n                    std::string value = line.substr(delimiterPos + 1);\n                    config[currentSection][key] = value;\n                }\n            }\n        }\n\n        file.close();\n        return true;\n    }\n\n    std::string getValue(const std::string& section, const std::string& key) const {\n        std::map\u003Cstd::string, std::map\u003Cstd::string, std::string> >::const_iterator sectionIt = config.find(section);\n        if (sectionIt != config.end()) {\n            std::map\u003Cstd::string, std::string>::const_iterator keyIt = sectionIt->second.find(key);\n            if (keyIt != sectionIt->second.end()) {\n                return keyIt->second;\n            }\n        }\n        return \"\";\n    }\n\n    void printConfig() const {\n        std::map\u003Cstd::string, std::map\u003Cstd::string, std::string> >::const_iterator sectionIt;\n        for (sectionIt = config.begin(); sectionIt != config.end(); ++sectionIt) {\n            std::cout \u003C\u003C \"[\" \u003C\u003C sectionIt->first \u003C\u003C \"]\" \u003C\u003C std::endl;\n            std::map\u003Cstd::string, std::string>::const_iterator keyIt;\n            for (keyIt = sectionIt->second.begin(); keyIt != sectionIt->second.end(); ++keyIt) {\n                std::cout \u003C\u003C keyIt->first \u003C\u003C \" = \" \u003C\u003C keyIt->second \u003C\u003C std::endl;\n            }\n            std::cout \u003C\u003C std::endl;\n        }\n    }\n};\n\nint main() {\n    ConfigParser parser;\n    if (parser.parse(\"config.ini\")) {\n        std::cout \u003C\u003C \"Config file parsed successfully.\" \u003C\u003C std::endl;\n        parser.printConfig();\n\n        std::cout \u003C\u003C \"Value of 'key1' in 'section1': \" \u003C\u003C parser.getValue(\"section1\", \"key1\") \u003C\u003C std::endl;\n    } else {\n        std::cout \u003C\u003C \"Failed to parse config file.\" \u003C\u003C std::endl;\n    }\n    return 0;\n}\n```\n\nThe same prompts can be applied to [explaining code](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#explain-code-in-the-ide), [generating tests](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#write-tests-in-the-ide), and even [refactoring code](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#refactor-code-in-the-ide): `/explain focus on C++03 features`, `/tests for C++03 features`, `/refactor into C++03`. \n\nThe following C++ standards have been tested working with GitLab Duo Code Suggestions:\n\n- C++03\n- C++11\n- C++14\n- C++17\n- C++20\n- C++23\n\nYou can find the source code in the [Challenge - Refactor Cpp Standards Config Parser project](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-cpp-standards-config-parser). \n\nThere are many different combinations to refactor source code between the C++ standards. The following examples reflect on modernizing the C++ code with version upgrades, but also test a downgrade where ambitious developers wanted to use C++20 but had to use C++11 to support enterprise customer environments.\n\n#### Migration: Refactor C++03 into C++14 \n\nThe goal is to modernize the code, and understand the changes between C++03 and C++14. Open the [03.cpp](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-cpp-standards-config-parser/-/blob/main/03.cpp?ref_type=heads) source code in the IDE, select it, and use the following Chat prompt: \n\n```markdown\n/refactor into C++14 and explain the changes\n```\n\nPotential Chat response: \n\n```markdown\n…\n\nExplanations of the changes:\n\n1. Default constructor: Changed `ConfigParser() {}` to `ConfigParser() = default;` to use the default constructor.\n2. File opening: Removed `.c_str()` when opening the file, as C++11 and later support `std::string` directly.\n3. Lambda functions: Introduced a lambda function trim to handle string trimming, making the code more readable and reusable.\n4. Auto keyword: Used `auto` for type deduction in various places, improving code readability.\n5. Range-based for loops: In the `printConfig` method, replaced traditional for loops with range-based for loops, which are more concise and less error-prone.\n6. Structured bindings: Used structured bindings in the range-based for loops to directly access key-value pairs.\n7. If statements with initializers: In the `getValue` method, used if statements with initializers to combine the find operation and the check in a single line.\n8. String member functions: Used `front()` and `back()` instead of array-style access for the first and last characters of strings.\n9. Uniform initialization: Used uniform initialization `{}` instead of parentheses `()` for object creation.\n10. Removed `std::endl`: Replaced `std::endl` with `'\\n'` for better performance, as `std::endl` forces a flush of the output buffer.\n\nThese changes make the code more modern, concise, and potentially more efficient while maintaining the same functionality.\n\n```\n\n![Changes reflected in code](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/duo_refactor_cpp_clion_chat_cpp03_to_14_01.png)\n\n![Changes reflected in code 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675060/Blog/Content%20Images/duo_refactor_cpp_clion_chat_cpp03_to_14_02.png)\n\n#### Downgrade: Refactor C++23 to C++11\n\nSometimes, developers started a new feature proof-of-concept in a modern C++ standard like C++20. The actual supported versions for enterprise customer environments require older compilers with C++11 support. Downgrading the versions come at a cost, and require evaluating which language features are still supported, and which other disadvantages could come across.\n\nOpen the [23.cpp](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-cpp-standards-config-parser/-/blob/main/23.cpp?ref_type=heads) source code in the IDE, select it, and use the following Chat prompt: \n\n```markdown\n/refactor into C++11, explain the necessary changes and potential problems\n```\n\nPotential Duo Chat response: \n\n```markdown\nThe main changes required to make this code compatible with C++11 are:\n\n1. Remove the C++23 standard comment, as it's no longer applicable.\n2. Replace `std::string::find_first_not_of` and `std::string::find_last_not_of` with C++11 compatible alternatives in the trim function.\n3. Use `const auto&` instead of `auto` for loop variables to avoid unnecessary copying.\n\nPotential problems in the original code:\n\n1. The original code used C++23 features, which might not be widely supported yet.\n2. The `trim` function used `std::string::find_first_not_of` and `std::string::find_last_not_of`, which are not available in C++11.\n3. The use of `auto` instead of `const auto&` in some loops could lead to unnecessary copying of complex objects.\n\nThese issues have been addressed in the refactored version. The `trim` function now uses `std::find_if_not` with lambda functions to achieve the same result in a C++11 compatible way. The use of `const auto&` ensures that no unnecessary copying occurs when iterating over the config map.\n```\n\n![The `trim` function now uses `std::find_if_not` with lambda functions to achieve the same result in a C++11 compatible way](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/duo_refactor_cpp_clion_chat_cpp23_to_11_01.png)\n\n![The use of `const auto&` ensures that no unnecessary copying occurs when iterating over the config map](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/duo_refactor_cpp_clion_chat_cpp23_to_11_02.png)\n\n**Async practice**: Test more version refactoring scenarios.\n\n### Explain and refactor COBOL\n\nYou can use GitLab Duo to explain the source code, analyze, fix and refactor for COBOL programs. I have never written nor learned COBOL, and found this helpful [COBOL Programming Course](https://github.com/openmainframeproject/cobol-programming-course) with many examples.\n\nI then asked Chat how to get started with COBOL, create a COBOL program, and compile a COBOL program on macOS.\n\n```markdown\nPlease explain what COBOL is and its syntax\n\nPlease create a COBOL program that shows the first steps\n\nTell me more about the COBOL compiler. Which system do I need? Can I do it on my macOS?\n\n```\n\n![Asking GitLab Duo Chat to explain and its syntax](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/vscode_chat_cobol_generate_example.png)\n\nOpen a COBOL program, select the source code, switch to Duo Chat and send the `/explain` prompt to explain purpose and functionality.\n\nYou can also refine the prompts to get more high-level summaries, for example:\n\n```markdown \n/explain like I am five\n```\n\n> Tip: Programming languages share similar algorithms and functionality. For COBOL, Chat offered to explain it using Python, and, therefore, I adjusted future prompts to ask for an explanation in Python.\n\n```markdown\n/explain in a different programming language\n```\n\nYou can also use the `/refactor` slash command prompt in Chat to improve the code quality, fix potential problems, and try to refactor COBOL into Python.\n\n```markdown\n/refactor fix the environment error\n\n/refactor fix potential problems\n\n/refactor into Python\n```\n\nThe [GitLab Duo Coffee Chat - Challenge: Explain and Refactor COBOL programs](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-explain-refactor-cobol-program) recording shows all discussed steps in a practical use case, including how to find a missing period: \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/pwlDmLQMMPo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Refactor a language into another language\n\nModernization and code quality improvements sometimes require the change of a programming language. Similar refactor prompts with GitLab Duo can help speed up the migration process. The COBOL example with Python is just one of many requirements in enterprise environments -- let's dive into more use cases.\n\n### Refactor C to Rust \n\nIn early 2024, several programming languages, like C, have been called out for not being memory safe. The recommendations for future projects include [memory safe languages](https://about.gitlab.com/blog/memory-safe-vs-unsafe/) like Rust. But how do you start a migration, and what are the challenges?\n\nLet's try it with a simple example in C. The code was generated using Code Suggestions and should print the basic operating system information, like the name, version, and platform. The C code compiles cross-platform on Windows, Linux, and macOS.\n\n```c\n// Read OS files to identify the platform, name, versions\n// Print them on the terminal\n#include \u003Cstdio.h>\n#include \u003Cstdlib.h>\n#include \u003Cstring.h>\n\n#ifdef _WIN32\n    #include \u003Cwindows.h>\n#elif __APPLE__\n    #include \u003Csys/utsname.h>\n#else\n    #include \u003Csys/utsname.h>\n#endif\n\nvoid get_os_info() {\n    #ifdef _WIN32\n        OSVERSIONINFOEX info;\n        ZeroMemory(&info, sizeof(OSVERSIONINFOEX));\n        info.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);\n        GetVersionEx((OSVERSIONINFO*)&info);\n\n        printf(\"Platform: Windows\\n\");\n        printf(\"Version: %d.%d\\n\", info.dwMajorVersion, info.dwMinorVersion);\n        printf(\"Build: %d\\n\", info.dwBuildNumber);\n    #elif __APPLE__\n        struct utsname sys_info;\n        uname(&sys_info);\n\n        printf(\"Platform: macOS\\n\");\n        printf(\"Name: %s\\n\", sys_info.sysname);\n        printf(\"Version: %s\\n\", sys_info.release);\n    #else\n        struct utsname sys_info;\n        uname(&sys_info);\n\n        printf(\"Platform: %s\\n\", sys_info.sysname);\n        printf(\"Name: %s\\n\", sys_info.nodename);\n        printf(\"Version: %s\\n\", sys_info.release);\n    #endif\n}\n\nint main() {\n    get_os_info();\n    return 0;\n}\n```\n\nOpen the source code in [`os.c`](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-c-to-rust/-/blob/897bf57a14bb7be07d842e7f044f93a61456d611/c/os.c) in JetBrains CLion, for example. Select the source code and use the Chat prompt `/explain` to explain purpose and functionality. Next, use `/refactor` in the Chat prompt to refactor the C code, and then take it one step further: `/refactor into Rust`. \n\nInitialize a new Rust project (Tip: Ask Duo Chat), and copy the generated source code into the `src/main.rs` file. Run `cargo build` to compile the code. \n\n![Initialize a new Rust project, and copy the generated source code into the `src/main.rs` file. Run `cargo build` to compile the code.](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/jetbrains_clion_c_rust.png)\n\nIn the [GitLab Duo Coffee Chat: Challenge - Refactor C into Rust](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-c-to-rust) recording, you can learn all steps, and additionally, you'll see a compilation error which gets fixed with the help of Chat and `/refactor` slash command. The session also shows how to improve the maintanability of the new Rust code by adding more error handling. \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/nf8g2ucqvkI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Refactor Perl to Python \n\nThat one script that runs on production servers, does its job, the author left the company ten years ago, and nobody wants to touch it. The problem might also apply to multiple scripts, or even a whole application. A decision was made to migrate everything to modern Python 3, with the goal to modernize the code, and understand the changes between Perl and Python.\n\nA customer recently asked in a GitLab Duo workshop whether a direct migration is possible using GitLab Duo. Short answer: Yes, it is. Longer answer: You can use refined Chat prompts to refactor Perl code into Python, similar to other examples in this article.\n\nOpen the `script.pl` source code in IDE, select it, and open Chat.\n\n```perl\n#!/usr/bin/perl\nuse strict;\nuse warnings;\n\nopen my $md_fh, '\u003C', 'file.md' or die \"Could not open file.md: $!\";\n\nmy $l = 0;\nmy $e = 0;\nmy $h = 0;\n\nwhile (my $line = \u003C$md_fh>) {\n  $l++;\n  if ($line =~ /^\\s*$/) {\n    $e++;\n    next;\n  }\n  if ($line =~ /^#+\\s*(.+)/) {\n    print \"$1\\n\";\n    $h++; \n  }\n}\n\nprint \"\\nS:\\n\"; \nprint \"L: $l\\n\";\nprint \"E: $e\\n\"; \nprint \"H: $h\\n\";\n```\n\nYou can use the following prompts to:\n\n1. `/explain` its purpose, and `/refactor` to improve the code.\n2. `/refactor into Python` to get a working Python script.\n\n![Refactor into Python](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/pycharm_duo_refactor_perl_python.png)\n\n> Tip: You can refactor Perl code into more target languages. The [GitLab Duo Coffee Chat: Challenge - Refactor Perl to Python](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-perl-python) recording shows PHP, Ruby, Rust, Go, Java, VB.NET, C#, and more.\n> \n> If you want to continue using Perl scripts, you can configure [Perl as additional language](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html#add-support-for-more-languages) in Duo Code Suggestions. Chat already understands Perl and can help with questions and slash command prompts, as you can see in the following recording.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/03HGhxXg9lw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## More Refactoring Hands-on \n\n### Refactor JavaScript \n\nEddie Jaoude shows how to refactor JavaScript to improve code quality or add functionality in a practical example. \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/mHn8KOzpPNY\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Refactor Bash to ZSH or SH\n\nI have used Bash as a shell for 20 years and most recently switched to ZSH on macOS. This resulted in script not working, or unknown errors in my terminal. Another use case for refactoring are shell limitations – some operating systems or Linux/Unix distributions do not provide Bash, only SH, for example, Alpine.\n\n![Refactor shell scripts](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/intellj_refactor_shell_scripts.png)\n\nThe [GitLab Duo Coffee Chat: Challenge - Refactor Shell Scripts](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-shell-scripts) shows an example with a C program that can tail syslog files, and a build script written in Bash. Throughout the challenge, Chat is queried with `/explain` and `/refactor` prompts to improve the code. It is also possible to refactor Bash into POSIX-compliant SH or ZSH. The session concludes with asking Chat to provide five different Shell script implementations, and explain the key summaries. \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/mssqYjlKGzU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### More use cases and tutorials\n\n- [Documentation: GitLab Duo use cases](https://docs.gitlab.com/ee/user/gitlab_duo/use_cases.html)\n- [Tutorial: Top tips for efficient AI-powered code suggestions with GitLab Duo](https://about.gitlab.com/blog/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo/)\n- [Tutorial: 10 best practices for using AI-powered GitLab Duo Chat](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/)\n\n## Key takeaways \n\n1. GitLab Duo provides efficient help with explaining and refactoring code. \n1. You can refactor code between language standards, and ask follow-up questions in Chat.\n1. Code Suggestions prompts can generate specific language standards, and code completion respects the current code context. \n1. Refactoring code into new programming languages helps with longer term migration and modernization plans.\n1. Code can be \"downgraded\" into older system's supported language standards.\n1. GitLab Duo can explain complex code and programming languages with different programming language examples.\n1. The update to Anthropic Claude 3.5 on GitLab.com has improved the quality and speed of Code Suggestions and Chat once again (self-managed upgrade to 17.3 recommended).\n1. There are no boundaries except your imagination, and production pain points.\n\nLearn more about efficient Code Suggestions and Chat workflows, and start your AI-powered code refactoring journey with GitLab Duo today!\n\n> [Start your 60-day free trial of GitLab Duo!](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial&toggle=gitlab-duo-pro_)\n",[9,724,703],{"slug":2656,"featured":6,"template":680},"refactor-code-into-modern-languages-with-ai-powered-gitlab-duo","content:en-us:blog:refactor-code-into-modern-languages-with-ai-powered-gitlab-duo.yml","Refactor Code Into Modern Languages With Ai Powered Gitlab Duo","en-us/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo.yml","en-us/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo",{"_path":2662,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2663,"content":2668,"config":2674,"_id":2676,"_type":14,"title":2677,"_source":16,"_file":2678,"_stem":2679,"_extension":19},"/en-us/blog/refactoring-javascript-to-typescript-with-gitlab-duo-workflow",{"title":2664,"description":2665,"ogTitle":2664,"ogDescription":2665,"noIndex":6,"ogImage":1845,"ogUrl":2666,"ogSiteName":693,"ogType":694,"canonicalUrls":2666,"schema":2667},"Refactoring JavaScript to TypeScript with GitLab Duo Workflow","Learn how we used our autonomous AI agent, which sits in your development environment, to convert a real-world JavaScript application to TypeScript.","https://about.gitlab.com/blog/refactoring-javascript-to-typescript-with-gitlab-duo-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Refactoring JavaScript to TypeScript with GitLab Duo Workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Frédéric Caplette\"}],\n        \"datePublished\": \"2025-05-22\",\n      }",{"title":2664,"description":2665,"authors":2669,"heroImage":1845,"date":2671,"body":2672,"category":10,"tags":2673},[2670],"Frédéric Caplette","2025-05-22","TypeScript adoption continues to grow, with over 88% of developers reporting they either use or want to use it. Yet, migrating existing JavaScript codebases to TypeScript is often a time-consuming process. Enter [GitLab Duo Workflow](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/): secure, agentic AI that sits right inside your development environment, helping transform high-level tasks into executable workflows. In this article, you'll learn how we used Duo Workflow to update Duo Workflow, converting a real-world JavaScript application to TypeScript. We'll also review the technical process and broader implications for development workflows.\n\nThis video walks through visually what you'll read below:\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1085078036?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Refactor JavaScript to TypeScript with GitLab Duo Workflow\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## The challenge: Refactor JS to TS\n\nWe decided to migrate Duo Workflow client-related logic to TypeScript for better type safety and auto-complete. A JavaScript-to-TypeScript migration involves more than just changing file extensions. It requires:\n\n1. Analyzing existing code patterns to determine appropriate types\n2. Handling edge cases where type inference is ambiguous\n3. Ensuring consistency across the codebase\n4. Managing dependencies and third-party libraries\n5. Validating that runtime behavior remains unchanged\n\nDoing all of this work manually can be very time consuming and not much fun. Thankfully, managing your projects is easier and more efficient with Duo Workflow – even when the project is Duo Workflow.\n\n## How it works\n\nIf you've used AI coding assistants before, you're likely familiar with their chat-based interfaces (like [GitLab Duo Chat](https://about.gitlab.com/blog/gitlab-duo-chat-now-generally-available/)). These chats can provide code snippets, suggested directory structures, and more. GitLab Duo Workflow fundamentally differs from these in several key ways:\n\n1. It's contextually aware of your entire codebase, not just open files or highlighted text. You can also add context from relevant GitLab issues or merge requests.\n2. It develops and executes a multi-stage plan, allowing for more complete solutions.\n3. It keeps a human in the loop, ensuring you understand and approve of what is happening at each step of the way.\n4. It stages changes to your repository, giving you control over what to accept.\n\nDuo Workflow uses an agentic AI approach, powered by Claude 3.5 Sonnet, to understand code semantics while maintaining a deep integration with GitLab's DevOps platform.\n\nLet's walk through a real-world example of refactoring a JavaScript file using GitLab Duo Workflow. In our case, we started with the prompt:\n\n```unset\nI need to refactor a JavaScript file packages/webview_duo_workflow/src/app/stores/workflow.js to TypeScript. Please analyze the code, identify any issues, and convert it to TypeScript with appropriate type definitions.\n```\n\n### Analysis\n\nAfter prompting, Duo Workflow first analyzed the codebase structure:\n\n* Identified that `workflow.js` is a Pinia store implementation\n* Noted it uses imports from various modules including `constants.ts` (which was already in TypeScript)\n* Recognized GraphQL queries import exists but the file wasn't accessible\n\nBased on this analysis, Duo Workflow identified specific typing needs:\n\n**Type definitions needed for:**\n\n* Store State interface with precise types:\n  * `areWorkflowsLoading`: boolean\n  * `isLoadingWorkflow`: boolean\n  * `isLoadingRecentWorkflows`: boolean\n  * `recentWorkflows`: array\n  * `workflows`: array\n  * `workflowsPageInfo`: object\n  * `initialState`: object\n  * `activeWorkflow`: object with specific shape\n\n**Interface definitions required:**\n\n* Workflow interface with properties:\n  * `id`: string\n  * `goal`: string\n  * `status`: string\n  * `checkpoint`: object\n  * `type`: string\n\n**Parameter types needed for:**\n\n* GraphQL response types for workflows data\n* Event handler parameter types\n* Function parameter types for actions like `getWorkflowById`, `runWorkflow`, etc.\n\nDuo Workflow identified several complex patterns requiring special handling:\n\n* Uses optional chaining extensively\n* Has nested object destructuring\n* Contains GraphQL integration\n* Uses Pinia store patterns with state, getters, and actions\n* Implements an events mapping system\n\nThe system recognized external type dependencies:\n\n* Pinia store types\n* GraphQL query response types\n* Functions from an internal Bridge module were also properly recognized (for `sendRequest`, `sendGraphqlRequest`)\n\nThe final analysis concluded: \"The file will need to be renamed from .js to .ts and appropriate type definitions added while maintaining the existing functionality.\"\n\n### Planning\n\nBased on this thorough analysis, Duo Workflow created a structured plan consisting of specific tasks:\n\n1. **Create a new TypeScript file** `workflow.ts` in the same directory as the original JS file.\n\n2. **Create TypeScript interfaces** for the store state and workflow types at the top of the new file, including:\n\n   * WorkflowState interface for store state\n   * Workflow interface for workflow object structure\n   * WorkflowPageInfo interface for pagination info\n   * WorkflowCheckpoint interface for checkpoint data\n\n3. **Copy and add type annotations** to the store implementation from `workflow.js`, including:\n\n   * State return type\n   * Getter return types\n   * Action parameter types\n   * Action return types\n\n4. **Update all imports** in `workflow.ts` to:\n\n   * Add type imports from Pinia\n   * Update local imports to use .ts extensions where applicable\n   * Import any required type definitions\n\n### Execution\n\nAfter the plan is complete, we were prompted to  “Approve plan.” Before clicking approve, we reviewed each step and ensured we were comfortable with the plan. After approval, Duo Workflow showed its progress through each step with visual indicators and explanations of what API operations were supporting each task (like \"Supported by: create_file_with_contents\" or \"Supported by: edit\\_file\"). When the work was done, we reviewed the changes before committing.\n\n## What we learned\n\nThis JavaScript-to-TypeScript migration example showcases a significant evolution in AI-assisted development. What makes GitLab Duo Workflow particularly interesting is its approach to:\n\n### Task-oriented programming vs. suggestion-only assistance\n\nUnlike many AI assistants that simply offer code snippets or suggestions, Duo Workflow understands and executes complete tasks. The difference is significant — rather than saying \"here's some TypeScript code you might use,\" it says \"I'll convert this file for you, here's my plan, and here are the changes I'm making.\"\n\n### Contextual understanding of the entire codebase\n\nThe tool demonstrates awareness of project structure, related files (like constants.ts and GraphQL queries), and the relationships between components. This contextual understanding allows for more sophisticated conversions than localized transformations.\n\n### Step-by-step execution with visibility\n\nThe plan-based approach, with clear steps and progress indicators, provides transparency into what would otherwise be a black-box process. This allows developers to understand what the AI is doing and how it's approaching the problem.\n\n> GitLab Duo Workflow is currently available in private beta for GitLab Ultimate customers. [Sign up for the waitlist today!](https://about.gitlab.com/gitlab-duo/agent-platform/)\n\n## Learn more\n\n- [Agentic AI guides and resources](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/)\n- [GitLab Duo Workflow](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)",[9,724,482,704],{"slug":2675,"featured":91,"template":680},"refactoring-javascript-to-typescript-with-gitlab-duo-workflow","content:en-us:blog:refactoring-javascript-to-typescript-with-gitlab-duo-workflow.yml","Refactoring Javascript To Typescript With Gitlab Duo Workflow","en-us/blog/refactoring-javascript-to-typescript-with-gitlab-duo-workflow.yml","en-us/blog/refactoring-javascript-to-typescript-with-gitlab-duo-workflow",{"_path":2681,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2682,"content":2688,"config":2693,"_id":2695,"_type":14,"title":2696,"_source":16,"_file":2697,"_stem":2698,"_extension":19},"/en-us/blog/remediating-vulnerabilities-with-insights-and-ai",{"title":2683,"description":2684,"ogTitle":2683,"ogDescription":2684,"noIndex":6,"ogImage":2685,"ogUrl":2686,"ogSiteName":693,"ogType":694,"canonicalUrls":2686,"schema":2687},"Remediating vulnerabilities with GitLab's security insights and AI","Learn how to leverage vulnerability insights and the Explain this Vulnerability AI feature to not only resolve a vulnerability, but also understand it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662877/Blog/Hero%20Images/security-cover-new.png","https://about.gitlab.com/blog/remediating-vulnerabilities-with-insights-and-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Remediating vulnerabilities with GitLab's security insights and AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2023-08-31\",\n      }",{"title":2683,"description":2684,"authors":2689,"heroImage":2685,"date":2690,"body":2691,"category":10,"tags":2692},[1391],"2023-08-31","\nWe recently introduced [GitLab Duo](https://about.gitlab.com/gitlab-duo/), a complete suite of AI capabilities to power your DevSecOps workflows. GitLab Duo's AI features not only enable you to write secure code faster, but also enhance productivity by providing helpful explanations and insights into your code. For instance, you can harness the power of AI to prevent security breaches. In this tutorial, we will go over the Explain this Vulnerability AI feature, which is in beta, and how it can be used with vulnerability insights to remediate vulnerabilities.\n\nYou will learn the following:\n* How the Explain this Vulnerability AI feature works\n* Prerequisites for Explain this Vulnerability and other GitLab AI features\n* How GitLab Vulnerability Insights assists in remediation\n* How to remediate a SQL-injection vulnerability using GitLab's vulnerability insights and Explain this Vulnerability\n* Additional GitLab AI capabilities (GitLab Duo currently requires connectivity to access Google large language models (LLMs), however, there are plans to expand these features to limited-connectivity environments)\n\nSee the following video for a quick overview of Vulnerability Insights + AI \"Explain this Vulnerability\". \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/1UagZx_CUks\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nYou can also see a detailed walkthrough of [Leveraging GitLab Vulnerability Insights + AI to Remediate a SQL-Injection](https://youtu.be/EJXAIzXNAWQ?feature=shared) in the [Solving a SQL injection using vulnerability insights and AI](#solving-a-sql-injection-using-vulnerability-insights-and-ai) section below.\n\n## What is the Explain this Vulnerability AI feature?\nThe [Explain this Vulnerability](https://docs.gitlab.com/ee/user/ai_features.html#explain-this-vulnerability-in-the-web-ui) feature\nleverages an LLM powered by Google AI to assist you in securing your application by:\n* Summarizing detected vulnerabilities\n* Helping developers and security analysts understand the vulnerability and its implications\n* Showing how a vulnerability can be exploited with detailed example code\n* Providing in-depth solutions to the vulnerability\n* Providing suggested mitigation along with sample code tuned toward your project's programming language\n\nTo begin using Explain this Vulnerability, you must have the following prerequisites configured:\n\n* [GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/) SaaS subscription\n* [Experiment features enabled](https://docs.gitlab.com/ee/user/group/manage.html#enable-experiment-features)\n* [Third-party AI features enabled](https://docs.gitlab.com/ee/user/group/manage.html#enable-third-party-ai-features)\n* Static application security testing ([SAST](https://docs.gitlab.com/ee/user/application_security/sast/)) vulnerability finding in the default branch of a project\n* [Maintainer](https://docs.gitlab.com/ee/user/permissions.html) or greater role in the vulnerable project \n* [SAST scanner](https://docs.gitlab.com/ee/user/application_security/sast/) enabled in the vulnerable project\n* An active internet connection\n\nOnce the prerequisites have been configured, to begin using Explain this Vulnerability, perform the following steps:  \n\n1) Navigate to the [Vulnerability Report](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/).  \n2) Find a SAST vulnerability finding.  \n3) Scroll to the bottom of the [vulnerability page](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/).  \n4) Press the **Try it out** button in \"Explain this Vulnerability and how to mitigate it with AI\" section.  \n\n![View of the \"Try it out\" button at bottom of screen](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/ai_explain_this_vulnerability_try_it_out_dialog.png)\n\nOnce you click the button, GitLab will begin to generate the following:\n* **What is the vulnerability?**: Details on the vulnerability and how it may affect your application\n* **How can an attacker take advantage of the vulnerability?**: Commands that a malicious actor can use to exploit the vulnerability\n* **How can the vulnerability be fixed?**: Details on how the vulnerability can be remediated\n* **Example of vulnerable code**: The actual vulnerable code in the language of your application\n* **Example of fixed code**: Code showing a fix that should be applied to remediate the vulnerability in the language of your application\n* **References**: Links providing details relevant to the vulnerability\n* **User rating request**: Allows for user input, which is used to improve the model\n\n![AI response depicting the above list](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/ai_explain_this_vulnerability_results.png)\n\nThis information can be used together with vulnerability insights to resolve the vulnerability. Now let's discuss vulnerability insights.\n\n## Vulnerability insights\nVulnerability insights provide detailed information on a vulnerability and how to resolve it. This detailed information\nincludes:\n\n* **Description**: A detailed description of the vulnerability and its implications\n* **Severity**: The severity of the vulnerability based on the [CVSS rating](https://nvd.nist.gov/vuln-metrics/cvss)\n* **Project**: The project where the vulnerability was found\n* **Tool**: The type of scanner that found the vulnerability\n* **Scanner**: The specific name of the scanner that found the vulnerability\n* **Location**: The line of code where the vulnerability is present\n* **Identifiers**: Links that identify and provide additional information on the vulnerability such as the CVE/CWE page\n* **Training**: Security training available from our partners to educate developers on the vulnerability\n* **Solution**: Information on how to remediate the vulnerability\n* **Method**: The [REST API method](https://www.w3schools.in/restful-web-services/rest-methods) used to exploit the vulnerability (dynamic scanners only)\n* **URL**: The URL in which the vulnerability was detected (dynamic scanners only)\n* **Request/response**: The request sent and response received when exploiting the vulnerability (dynamic scanners only)\n\n**Note**: Results may vary depending on the scanner used.\n\nHaving all this information not only allows you to resolve a vulnerability with ease but also enhances your security\nknowledge. All these insights are provided as a single source of truth that both developer and security teams can view and\ntake action on asynchronously.\n\nDevelopers can leverage insights within a merge request (MR). The MR insights show the vulnerabilities in the diff\nbetween a feature branch and the branch you are merging into. This allows you to continuously iterate until you have resolved\na vulnerability and then alert security engineers when approval is required, giving developers the power to resolve\nvulnerabilities themselves.\n\n![MR insights sample](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/vulnerability_insights_mr_view.png)\n\nThe security team can leverage insights via the [vulnerability report](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/). The vulnerability report shows vulnerabilities present in the `default` branch, which is typically linked to production. From here, the security team can collaborate on a resolution as well as triage and manage vulnerabilities.\n\n![Vulnerability report sample](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/vulnerability_insights_vulnerability_report.png)\n\n**Note**: Currently, the Explain this Vulnerability feature can only be seen in the Vulnerability Report view. It is currently\nbeing considered for the MR view, see [future iterations under consideration](https://gitlab.com/groups/gitlab-org/-/epics/10284#future-iterations-under-consideration) for more information.\n\n## Solving a SQL injection using vulnerability insights and AI\nBy leveraging both vulnerability insights and Explain this Vulnerability, we have all the resources necessary to\nnot only resolve a vulnerability but also understand it. Let's see how we can use these features to [solve a SQL injection](https://gitlab-de.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/documentation/anatomy_of_a_vulnerability/). \n\nNow let's go over the steps to remediate a SQL injection. You can follow along with the video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/EJXAIzXNAWQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n**Privacy notice**: Explain this Vulnerability only uses `public repos` to train the LLM. Code in private repositories\nis not transferred to the LLM.\n\nI will be using the [Simple Notes project](https://gitlab.com/gitlab-de/tutorials/security-and-governance/devsecops/simply-vulnerable-notes) to showcase this. You can set up DevSecOps within GitLab yourself by going over the following [tutorial](https://gitlab-de.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/). After you have done so, you can run through the following:\n\n1) Navigate to **Secure > Vulnerability Report**.\n\n2) Sort by **SAST** under **Scanner**.\n\n3) Find and select a SQL injection vulnerability. a SQL injection will be titled something like\n`Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')`.\n\n4) Examine the vulnerability insights.\n\n* **Description**: Detected possible formatted SQL query.\n* **Location**: File: [notes/db.py:100](https://gitlab.com/gitlab-de/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/-/blob/24ff1847aa70c4d51482fe28f019e3724b399aaf/notes/db.py#L100)\n* **Identifier**: [bandit.B608](https://semgrep.dev/r/gitlab.bandit.B608), [CWE-89](https://cwe.mitre.org/data/definitions/89.html)\n* **Solution**: Use parameterized queries instead=\n* **Training**: [Secure Code Warrior](https://portal.securecodewarrior.com/?utm_source=partner-integration:gitlab&partner_id=gitlab#/contextual-microlearning/web/injection/sql/python/vanilla), [SecureFlag](https://knowledge-base.secureflag.com/vulnerabilities/sql_injection/sql_injection_python.html), and [Kontra](https://application.security/gitlab/free-application-security-training/owasp-top-10-sql-injection)\n\n![SQL Injection Walkthrough - Insights](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/vulnerability_insights_vulnerability_report.png)\n\n5) Scroll down to the \"Explain this vulnerability and how to mitigate it with AI** section and click the **Try it out** button.\n\n**Privacy notice**: If the **Send code to prompt** radio button is selected, response quality is improved. However, the actual code is\nused in a query to the LLM (even in private repositories).\n\n![SQL Injection Walkthrough - AI \"Try it out\" button](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/ai_explain_this_vulnerability_try_it_out_dialog.png)\n\n6) Examine the provided AI solutions.\n\n![SQL Injection Walkthrough - AI response](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/ai_explain_this_vulnerability_results.png)\n\n7) Exploit the vulnerability\nWe can use the information provided in the **AI response**, the samples in the **vulnerability insight CWE identifier**,\nand the applications [API guide](https://gitlab-de.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/documentation/api_guide/) to generate a malicious curl command as follows:\n\n```bash\n# A REGULAR API-CALL\n$ curl http://{LOAD_BALANCER_IP}/{APPLICATION_PATH}/api\n\n{\"Note\":\"[(1, 'cat'), (2, 'dog'), (3, 'frog'), (4, 'hog')]\"}\n\n# API CALL PASSING '1 or 1=1' AS SHOWN IN AI RESPONSE AND DETAILED IN IDENTIFIERS\n# NOTE: `1%20or%201%3D1` IS URL ENCODED '1 or 1=1'\n$ curl http://{LOAD_BALANCER_IP}/{APPLICATION_PATH}/api\\?id\\=1%20or%201%3D1\n\n{\"Note\":\"[(1, 'cat'), (2, 'dog'), (3, 'frog'), (4, 'hog'), (5, 'meow'), (6, 'bark'), (7, 'ribbit'), (8, 'grunt')]\"}\n```\n\nThis shows us that we can exploit the SQL injection since we exposed data we should not have access to.\nExploiting a vulnerability is not always as simple, so it is important to combine resources as noted above\nto figure out exploitability.\n\n8) Determine a fix.\n\nNow that we know this is a problem within our system, we can use the provided information to create an merge request (MR) to resolve\nand then test the MR in a non-production environment. Reviewing the vulnerability insights and AI response, we know we can solve this\nin a variety of ways. For example, we can:\n  \n* Use parameterized queries rather than directly calling the query\n* Sanitize the input before passing it to the `execute()` method\n\nTo enhance our knowledge, we should read [CWE-89](https://cwe.mitre.org/data/definitions/89.html) provided in the Identifiers.\n\n9) Open the [GitLab WebIDE](https://docs.gitlab.com/ee/user/project/web_ide/) or editor of your choice.\n\n10) Open the vulnerable file and scroll to the affected line of code. We found this using the information provided in the insights.\n\n11) Apply the suggested change by reviewing the vulnerability insights and AI response. I changed the following:\n\n```python\ntry:\n  query = \"SELECT id, data FROM notes WHERE (secret IS FALSE AND id = %s)\" % id\n  if admin:\n    query =\"SELECT id, data, secret FROM notes WHERE (id = %s)\" % id\n  # NOT USING A PARAMETERIZED QUERY - SQL INJECTION CAN BE PASSED IN (,id)\n  cur.execute(query)\nexcept Exception as e:\n  note.logger.error(\"Error: cannot select note by id - %s\" % e)\n```\n\nto \n\n```python\ntry:\n  query = \"SELECT id, data FROM notes WHERE (secret IS FALSE AND id = %s)\"\n  if admin:\n    query =\"SELECT id, data, secret FROM notes WHERE (id = %s)\"\n  # USING A PARAMETERIZED QUERY - SQL INJECTION CANNOT BE PASSED IN (,id)\n  cur.execute(query, (id,))\nexcept Exception as e:\n  note.logger.error(\"Error: cannot select note by id - %s\" % e)\n```\n\nWe know this is the solution because parameterized queries as explained do not allow actual SQL\ncommands to be run. Therefore, a SQL injection cannot be passed as the `id`. Adding a parameterized\nquery is easy since it is built into the Python db library we are using.\n\nThere may be multiple solutions to a vulnerability. It is up to the user to decide what is best\nfor their application and workflow. The AI response provides a typical solution, but more can be\nexamined and applied. For example, the AI response said we can add the following:\n\n```python\ncur.execute(query.replace(\"'\", \"''\"))\n```\n\nThis would escape the single quotes in the input, making it safe to pass to the `execute()` method.\nIt is a valid solution with less code required. However, I wanted to restructure my code, so I applied\nanother solution found in the vulnerability insights.\n\n12) Create an MR with the fix. In my environment, feature branches are automatically deployed\nto a new environment independent from production so we can test our features before merging them\nto production.\n\n13) Test the change in a non-production environment.\n\nOnce we push the MR, we can see if the vulnerability has been resolved and we can test in a non-production\nenvironment:\n\n```bash\n# A REGULAR API-CALL\n$ curl http://{LOAD_BALANCER_IP}/{NEW_BRANCH_FIXED_APPLICATION_PATH}/api\n\n{\"Note\":\"[(1, 'cat'), (2, 'dog'), (3, 'frog'), (4, 'hog')]\"}\n\n# API CALL PASSING '1 or 1=1' AS SHOWN IN AI RESPONSE AND DETAILED IN IDENTIFIERS\n# NOTE: `1%20or%201%3D1` IS URL ENCODED '1 or 1=1'\n$ curl http://{LOAD_BALANCER_IP}/{NEW_BRANCH_FIXED_APPLICATION_PATH}/api\\?id\\=1%20or%201%3D1\n\n{\"Note\":\"[(1, 'cat')]\"}\n```\n\nWe can see that now the additional query parameters `or 1=1` are ignored and only the first element\nis returned, meaning only the `1` was passed. We can further test if we can get item `5` which we should\nnot have access to:\n\n```bash\n# API CALL PASSING '5 or 1=1' AS SHOWN IN AI RESPONSE AND DETAILED IN IDENTIFIERS\n# NOTE: `5%20or%201%3D1` IS URL ENCODED '5 or 1=1'\n$ curl http://{LOAD_BALANCER_IP}/{NEW_BRANCH_FIXED_APPLICATION_PATH}/api\\?id\\=5%20or%201%3D1\n{\"Note\":\"[]\"}\n```\n\nSuccess, the SQL injection is no longer present!\n\n14) Merge into production.\n\nNow that we know the vulnerability has been resolved we can go ahead and merge our fix! This is how you can use vulnerability insights\nto help resolve your vulnerabilities. If you wish to test all this for yourself, check out the complete [GitLab DevSecOps tutorial](https://gitlab-de.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/).\n\n## Additional GitLab AI features\nAs we have seen above, Explain this Vulnerability assists you in remediating the vulnerabilities within your\ndefault branch, but that's not the only AI feature GitLab has available! Other AI features to enhance your productivity include:\n\n* [Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html): Enables you to write code more efficiently by viewing code suggestions as you type\n* [Suggested Reviewers](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/#suggested-reviewers): Helps you receive faster and higher-quality reviews by automatically finding the right people to review a merge request\n* [Value Stream Forecasting](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html): Predicts productivity metrics and identifies anomalies across your software development lifecycle\n* [Summarize Issue Comments](https://docs.gitlab.com/ee/user/ai_features.html#summarize-issue-discussions): Quickly gets everyone up to speed on lengthy conversations to ensure you are all on the same page\n* [Summarize Proposed Merge Request Changes](https://docs.gitlab.com/ee/user/ai_features.html#summarize-my-merge-request-review): Helps merge request authors drive alignment and action by efficiently communicating the impact of their changes\n* [Summarize Merge Request Review](https://docs.gitlab.com/ee/user/ai_features.html#summarize-merge-request-changes): Enables better handoffs between authors and reviewers and helps reviewers efficiently understand merge request suggestions\n* [Generate Tests in Merge Requests](https://docs.gitlab.com/ee/user/ai_features.html#generate-suggested-tests-in-merge-requests): Automates repetitive tasks and helps you catch bugs early\n* [GitLab Chat](https://docs.gitlab.com/ee/user/ai_features.html#gitlab-duo-chat): Helps you quickly identify useful information in large volumes of text, such as documentation\n* [Explain this Code](https://docs.gitlab.com/ee/user/ai_features.html#explain-selected-code-in-the-web-ui): Allows you to get up to speed quickly by explaining source code\n\nVisit our [GitLab Duo site](https://about.gitlab.com/gitlab-duo/) to learn more about these features, GitLab's mission around AI, and our partnership with Google.\n",[9,678,724],{"slug":2694,"featured":6,"template":680},"remediating-vulnerabilities-with-insights-and-ai","content:en-us:blog:remediating-vulnerabilities-with-insights-and-ai.yml","Remediating Vulnerabilities With Insights And Ai","en-us/blog/remediating-vulnerabilities-with-insights-and-ai.yml","en-us/blog/remediating-vulnerabilities-with-insights-and-ai",{"_path":2700,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2701,"content":2706,"config":2711,"_id":2713,"_type":14,"title":2714,"_source":16,"_file":2715,"_stem":2716,"_extension":19},"/en-us/blog/self-managed-support-for-code-suggestions",{"title":2702,"description":2703,"ogTitle":2702,"ogDescription":2703,"noIndex":6,"ogImage":820,"ogUrl":2704,"ogSiteName":693,"ogType":694,"canonicalUrls":2704,"schema":2705},"Self-managed support for Code Suggestions (Beta)","Self-managed support for Code Suggestions (Beta) is coming in GitLab 16.1.","https://about.gitlab.com/blog/self-managed-support-for-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Self-managed support for Code Suggestions (Beta)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Roger Woo\"}],\n        \"datePublished\": \"2023-06-15\",\n      }",{"title":2702,"description":2703,"authors":2707,"heroImage":820,"date":2708,"body":2709,"category":10,"tags":2710},[2062],"2023-06-15","\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nGitLab [Code Suggestions (Beta)](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html) uses generative AI to suggest code while you’re developing. Since introduction in GitLab SaaS with 15.9, we've heard self-managed customers have wanted to use the feature. We're now bringing Code Suggestions to self-managed instances beginning with GitLab 16.1 (expected to be released June 22). Self-managed users working with VS Code or GitLab’s WebIDE can now receive code suggestions to help accelerate your development efforts while you type.\n\n## How does Code Suggestions work?\nA self-managed instance administrator must enable Code Suggestions on behalf of an organization's entire instance. Once enabled, users of that self-managed instance can authenticate their IDE to GitLab.com infrastructure using a secure handshake with the user's self-managed instance. As users type in their IDE, a context window containing relevant source code is securely transmitted to GitLab’s infrastructure, which will return an AI-generated code suggestion. GitLab does not have any visibility into a self-managed customer’s code other than what is sent to generate the Code Suggestion. GitLab does not persist any customer code sent in that context window nor train on customer data.\n\nIn this video, Senior Backend Engineer [Nikola Milojevic](https://gitlab.com/nmilojevic1) demonstrates how to set up and configure GitLab’s Code Suggestions for self-managed users on VS Code.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube-nocookie.com/embed/-A4amG3E49Y\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n## How Code Suggestions uses data\nGitLab is mindful of privacy when we design our AI-powered features. With Code Suggestions, we securely transmit the data needed to generate a code suggestion, and we process all personal data in accordance with our [privacy statement](https://about.gitlab.com/privacy/). Our VS Code Workflow extension will securely transmit a minimal amount of data required to generate a code suggestion. GitLab does not receive any information outside of an IDE’s context. Read more about [Code Suggestions data usage](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#data-privacy).\n\n## Try Code Suggestions\nWe will be provisioning access on a customer-by-customer basis for this initial iteration of GitLab Code Suggestions (Beta) on self-managed instances. Please leave a note in [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/415393) tagging your Customer Success Manager for help with enabling Code Suggestions once your instance is ready.\n\nCode Suggestions for self-managed instances will require GitLab 16.1. Customers may try Code Suggestions either via GitLab’s WebIDE or VS Code’s GitLab Workflow Extension (version [3.67+](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow&ssr=false#version-history)).\n\n## Iterating on AI/ML features\nThis is just the start of the ways we're infusing GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI-assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our \"[AI/ML in DevSecOps](https://about.gitlab.com/blog/ai-ml-in-devsecops-series/)\" series.\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[703,725,704,9],{"slug":2712,"featured":6,"template":680},"self-managed-support-for-code-suggestions","content:en-us:blog:self-managed-support-for-code-suggestions.yml","Self Managed Support For Code Suggestions","en-us/blog/self-managed-support-for-code-suggestions.yml","en-us/blog/self-managed-support-for-code-suggestions",{"_path":2718,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2719,"content":2725,"config":2732,"_id":2734,"_type":14,"title":2735,"_source":16,"_file":2736,"_stem":2737,"_extension":19},"/en-us/blog/six-reasons-cisco-learning-and-certifications-adopted-gitlab",{"title":2720,"description":2721,"ogTitle":2720,"ogDescription":2721,"noIndex":6,"ogImage":2722,"ogUrl":2723,"ogSiteName":693,"ogType":694,"canonicalUrls":2723,"schema":2724},"6 reasons Cisco Learning and Certifications adopted GitLab","Learn what Cisco Learning and Certifications's principal engineer and lead architect shared about the organization's use of GitLab at our DevSecOps World Tour in Chicago.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670302/Blog/Hero%20Images/lightbulb-book.png","https://about.gitlab.com/blog/six-reasons-cisco-learning-and-certifications-adopted-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"6 reasons Cisco Learning and Certifications adopted GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2023-09-05\",\n      }",{"title":2720,"description":2721,"authors":2726,"heroImage":2722,"date":2727,"body":2728,"category":2729,"tags":2730},[1334],"2023-09-05","\nAfter adopting the GitLab DevSecOps Platform, the learning and certification arm of tech giant Cisco Systems, Inc. saw its toolchain shrink and was able to offer its entire software development team a way to improve collaboration and accelerate production.\n\nThat’s part of the story that Hank Preston, principal engineer and lead architect with [Cisco Learning and Certifications](https://www.cisco.com/c/en/us/training-events/training-certifications.html), shared in an on-stage interview at the Chicago stop of [GitLab’s DevSecOps World Tour](https://about.gitlab.com/events/devsecops-world-tour/).\n\nPreston also spent part of his conversation with Nico Ochoa, senior major account executive at GitLab, talking about the rapid rise in interest in AI and how that is likely to affect everything from how they create software to how newer developers learn to code.\n\nHere are some of the opportunities Cisco Learning and Certifications is gaining from using GitLab:  \n \n## 1. Getting a handle on version control\nPreston said one of the biggest challenges he was looking to overcome by adopting a DevSecOps platform was resolving and [preventing conflicts](/topics/version-control/how-implement-version-control/) they had keeping track of code modifications and version updates, which can be easy to lose sight of when handling them manually. For example, the team had been storing templates and configuration pieces in different places and ended up having an “extreme inability” to know what had been changed and where it went, he said. Using a platform gives the team a way to more easily and clearly manage source and version controls. “It gave us a way to actually keep everything in one place, and understand the trajectory and history of changes,” he said.\n\n## 2. Ensuring accountability\n“Gaining accountability was a big one for us,” Preston told the audience. “We needed to know when something changed, who changed it, and who approved that change.” In an efficient, high-performing DevSecOps team, this kind of transparent accountability is critical. Both the development and operations teams have an equal stake in DevSecOps success. The problem is that it can be difficult to do when team structures and responsibilities are outdated or haven’t been updated in documentation. Clearly distributing functions and noting who needs to do a task, as well as when and who accomplished a task, makes this work. “When I taught a bunch of network engineers how to do merge requests and branching, it became less of a challenge for us,” Preston said. \n\n## 3. Consolidating their toolchain\n“[Reducing our toolchain](/blog/too-many-toolchains-a-devops-platform-migration-is-the-answer/) … that was one of the reasons we selected GitLab. I didn't want to have to have a source control tool and a CI tool and an issues tool,” Preston said.\n\nCisco Learning and Certifications still uses Jira — a decision Preston said was not his own. “I'm looking forward to potentially getting rid of [Jira] in the future. But not having to manage a bunch of tools was a big value for us because then there are less things to integrate, less things to upgrade, and less things I have to train people on how to use,” he added.\n\n## 4. Boosting collaboration \nWhen different team members want to [collaborate](/blog/5-ways-collaboration-boosts-productivity-and-your-career/) on a project, sharing responsibilities and information, it’s critical for them to be able to all work in the same place. Preston explained that having a single, end-to-end platform that everyone works in means teammates don’t have to go looking for something across multiple tools. “Now everybody knows where to go to find the code and the templates they need. And the automation pieces are right there,” he added. “We don’t need to go looking for things or logging into, say, Mark's particular OneDrive folder to find a piece we need.” \n\n## 5. Having a single source of truth\n“I'm a big proponent of the concept of a single source of truth,” said Preston. “I tell people that the network shouldn't be the source of truth. That's the implementation of what our source of truth is.”\n\nCisco Learning and Certifications uses GitLab as the source of truth for a lot of its configuration templates, automation scripts and jobs, and infrastructure as code, according to Preston. “So whether it’s our data centers or sites, all the data should be aggregated and stored according to a single reference point. We have a plan and a way of doing things and everyone can see what that is,” he said.\n\n## 6. Speeding up production\nPreston also noted he’s using the platform to gain insight into and improve the time it takes to move a software project from ideation to development and into deployment. “One of the metrics I'm trying to use with our team now is how long it takes us to get something from an idea to working software,” he said. “If I’m trying to get an update into production or if I'm trying to fix something on the infrastructure stack, how long does it take us to get to where we feel comfortable pushing it into production? That's a metric we're watching. I don't want it to take 18 months to go from an idea to release. I needed that to be much faster.”\n\n## How AI could affect software creation, developer learning\nDuring his on-stage talk, Preston also turned his attention to artificial intelligence (AI), speaking about the growing call for the use of [AI in DevSecOps](/blog/ai-assisted-code-suggestions/), how it may be regulated, and how it could affect software developers.\n\n“AI in DevOps has changed dramatically just over the last three months or so,” he said. “The [AI transition](/blog/extending-code-suggestions/) has been rapid. I can't even pretend to guess what the next two years of it will look like, let alone the next five years. I think it’s an interesting challenge, and, well, to be honest, it makes me personally nervous.”\n\nHe added that he’s concerned about the issue of intellectual property and who owns the code that AI generates. Will the software built by AI be owned by the company the code was built for, or by the person or company that created the AI?\n\n“The intellectual property question is a big one. It’s something that has to be figured out,” he said.\n\nAnother thing that nags at Preston is how using AI in DevSecOps will affect the way people learn to develop code if it’s automatically being generated for them. Will people still become coding wizards if they don’t actually have to do it themselves?\n\n“I think about how this will affect the path of software developers and engineers who are coming onto the team,” he said. “How will they become our coding experts in the future? I love the code generation process but it also makes me worried. I wonder — and part of me thinks I'm just being the grouchy old man — how do we teach the new engineers how to make a connection to MySQL if the minute they type ‘M-Y-S-Q-L,’ the code comes up? That’s a scary spot. I know there's a lot of value to the grunt work being thinned out, but there’s value that comes with the doing and learning.”\n\nAnswering a question about whether Cisco allows developers to use AI-generated code, Preston replied, “Generally, no.” He said executives need to work through intellectual property questions first, but AI is a tool they plan to implement.\n\nPreston also was asked about maintaining standards when using AI to generate code. “We're looking for AI solutions we can leverage that will meet the coding requirements we have across the board,” he said. “But I think the question on standards is important. It's going to be the same way we handle it when people write code. Code will all have to be checked against set standards, whether it’s written by people or AI.”\n\n_Preston’s team within Cisco Learning and Certifications is responsible for creating and maintaining the software needed for the digital learning platform that offers instant user access to training information, classes, course materials, and exam preparation resources. Cisco employees use the platform to gain certifications, like CCNA, CCMP, and CCIE._\n\n_GitLab’s DevSecOps World Tour was designed to enable everyone — from technology champions to executives and software development team members — to gather and learn about the ideas and technologies driving the DevSecOps transformation. Learn more about our [DevSecOps World Tour](https://about.gitlab.com/events/devsecops-world-tour/)._\n","customer-stories",[703,2731,9],"customers",{"slug":2733,"featured":6,"template":680},"six-reasons-cisco-learning-and-certifications-adopted-gitlab","content:en-us:blog:six-reasons-cisco-learning-and-certifications-adopted-gitlab.yml","Six Reasons Cisco Learning And Certifications Adopted Gitlab","en-us/blog/six-reasons-cisco-learning-and-certifications-adopted-gitlab.yml","en-us/blog/six-reasons-cisco-learning-and-certifications-adopted-gitlab",{"_path":2739,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2740,"content":2746,"config":2752,"_id":2754,"_type":14,"title":2755,"_source":16,"_file":2756,"_stem":2757,"_extension":19},"/en-us/blog/solving-complex-challenges-with-gitlab-duo-workflow",{"title":2741,"description":2742,"ogTitle":2741,"ogDescription":2742,"noIndex":6,"ogImage":2743,"ogUrl":2744,"ogSiteName":693,"ogType":694,"canonicalUrls":2744,"schema":2745},"Solving complex challenges with GitLab Duo Workflow","Learn how a member of the GitLab Customer Success Management team uses agentic AI for real-world problem-solving, including addressing Helm chart limits in the package registry.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097663/Blog/Hero%20Images/Blog/Hero%20Images/Workflow%201800x945_2gQoQIbY9NvjLFpXtsxtXy_1750097663612.png","https://about.gitlab.com/blog/solving-complex-challenges-with-gitlab-duo-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Solving complex challenges with GitLab Duo Workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Johannes Bauer\"}],\n        \"datePublished\": \"2025-04-23\",\n      }",{"title":2741,"description":2742,"authors":2747,"heroImage":2743,"date":2749,"body":2750,"category":10,"tags":2751},[2748],"Johannes Bauer","2025-04-23","As a Customer Success Manager (CSM), I often face complex challenges that require quick, efficient, and innovative solutions. Recently, one of my enterprise customers encountered an issue with Helm chart limits in the GitLab package registry. At GitLab, we are committed to dogfooding our DevSecOps platform, so I turned to [GitLab Duo Workflow](https://about.gitlab.com/gitlab-duo/agent-platform/), our secure, agentic AI offering that is currently in private beta.\n\n[Agentic AI](https://about.gitlab.com/topics/agentic-ai/) represents the next evolution of generative AI, designed to go beyond single-task automation, such as code completion or test generation. While generative AI focuses on creating content based on specific prompts, agentic AI introduces a level of autonomy and context awareness, enabling it to perform complex, multi-step workflows. This advanced AI operates as a capable assistant that understands the broader goals of a task, plans intermediate steps, and executes them efficiently.\n\nAgentic AI, more specifically Duo Workflow, has transformed how I approach problem-solving as a CSM. Here's how I used Duo Workflow to resolve a real-world challenge and deliver value to my customer.\n\n## Addressing Helm chart limits in the GitLab package registry\n\nThe customer was dealing with a hardcoded limit for Helm charts in the GitLab package registry, which wasn’t flexible enough for their needs. They required a solution that allowed administrators to configure limits dynamically through the Admin UI. Addressing this issue was critical to maintaining their workflow efficiency and satisfaction.\n\n### How I leveraged GitLab Duo Workflow\n\nTo craft an effective solution, I leveraged Duo Workflow, a game-changer for brainstorming and strategizing with agentic AI.\n\nMy prompt was:\n\n> I am working on the following issue [insert issue number] and would like to resolve it. We have developed the following strategy:\n>\n> 1. Implement the feature as an application setting in the Admin UI, rather than using a configuration file approach.\n> 2. Utilize the existing package_registry jsonb column in the application settings table. Add a new key to this JSON structure to store the Helm chart limit.\n> 3. Create a new section in the Admin UI (/admin/application_settings/ci_cd#js-package-settings) for “specific limits for each format”. This will require some design work to determine how to best display the setting.\n> 4. Add a number field in this new section to allow administrators to set the Helm chart limit.\n> 5. Update the backend logic in the Helm package registry (app/finders/packages/helm/packages_finder.rb) to use this new application setting instead of the hardcoded limit.\n>\n>Please organize your updates into Frontend and Backend changes. Additionally, could you check if there are any specific requirements when adding a new column to the application settings table?\n\nThis prompt laid out a clear plan, enabling Duo Workflow to provide structured recommendations and actionable insights. The strategy developed was straightforward yet impactful. The changes were structured into frontend and backend tasks, ensuring clear separation and collaboration.\n\n![Result of GitLab Duo Workflow prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097675/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097674930.png)\n\n![Result of GitLab Duo Workflow prompt - 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097675/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097674931.png)\n\n## Results\n\nAfter implementing the solution together with Duo Workflow, I submitted a merge request, which was successfully merged and became available in GitLab 17.10!\n\nThe result? A flexible, user-friendly way for administrators to configure Helm chart limits, significantly enhancing the customer’s experience and aligning with their operational goals.\n\n![GitLab package registry page](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097675/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097674933.png)\n\nThe customer is thrilled to have this functionality available as it provides them with the flexibility and control they need to streamline their workflows. What’s even more exciting is that it only took me **hours** to complete the implementation, compared to the **days** I initially estimated, thanks to Duo Workflow. As someone without prior development experience in GitLab, this was a huge win!\n\nThis experience taught me the immense potential of AI tools like [GitLab Duo](https://about.gitlab.com/gitlab-duo/). By combining human expertise with AI-driven insights, we can address even the most complex challenges effectively. As a CSM, leveraging such tools streamlines problem-solving and strengthens trust and partnership with customers.\n\n## Delivering value\n\nGitLab Duo Workflow empowered me to tackle a complex technical issue head-on and find a solution that exceeded customer expectations. If you'd like to try Duo Workflow in your development environment, please [sign up for our private beta waitlist](https://about.gitlab.com/gitlab-duo/agent-platform/).\n\n## Learn more\n- [GitLab Duo Workflow: Enterprise visibility and control for agentic AI](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/)\n- [Use GitLab Duo Workflow to improve application quality assurance](https://about.gitlab.com/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance/)\n- [GitLab Duo Workflow documentation](https://docs.gitlab.com/user/duo_workflow/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n- [GitLab Duo](https://about.gitlab.com/gitlab-duo/)",[9,482,725],{"slug":2753,"featured":91,"template":680},"solving-complex-challenges-with-gitlab-duo-workflow","content:en-us:blog:solving-complex-challenges-with-gitlab-duo-workflow.yml","Solving Complex Challenges With Gitlab Duo Workflow","en-us/blog/solving-complex-challenges-with-gitlab-duo-workflow.yml","en-us/blog/solving-complex-challenges-with-gitlab-duo-workflow",{"_path":2759,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2760,"content":2766,"config":2772,"_id":2774,"_type":14,"title":2775,"_source":16,"_file":2776,"_stem":2777,"_extension":19},"/en-us/blog/southwest-looking-to-help-developers-take-flight",{"title":2761,"description":2762,"ogTitle":2761,"ogDescription":2762,"noIndex":6,"ogImage":2763,"ogUrl":2764,"ogSiteName":693,"ogType":694,"canonicalUrls":2764,"schema":2765},"Southwest looking to help developers take flight","Learn how the airline's DevOps teams are dramatically increasing their ability to detect and resolve problems with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665272/Blog/Hero%20Images/AdobeStock_380312133.jpg","https://about.gitlab.com/blog/southwest-looking-to-help-developers-take-flight","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Southwest looking to help developers take flight\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2024-01-30\",\n      }",{"title":2761,"description":2762,"authors":2767,"heroImage":2763,"date":2768,"body":2769,"category":2729,"tags":2770},[1334],"2024-01-30","Southwest Airlines Co. is working to make developers’ jobs easier.\n\nIT leaders at the world's largest low-cost carrier are moving to eliminate time-consuming and repetitive tasks from developers’ workflows, freeing their time and increasing their ability to focus on bigger projects.\n\n“The way we do that is by getting things out of their way,” said Jim Dayton, vice president and CISO at Southwest Airlines. “I am a firm believer that people go into software development because they love the creativity of it. They love the ability to solve problems. What we have to do is get out of their way and get the things that are blocking them out of their way.”\n\nPart of how Dayton is making that happen is by using GitLab’s platform.\n\nDayton talked about Southwest’s efforts to take care of their developers, and promote the work they’re doing, during an on-stage interview at the Dallas stop of [GitLab’s DevSecOps World Tour](https://about.gitlab.com/events/devsecops-world-tour/). He also spent part of his conversation with Reshmi Krishna, director of Enterprise Solutions Architecture at GitLab, discussing what benefits he hopes artificial intelligence capabilities will be able to offer his teams.\n\nThe Southwest exec, who said they’re moving toward a DevOps approach to application development, added that they’re providing developers with more self-service capabilities and knowledge management processes. “We want developers to be able to quickly look up a problem, look up a solution, and reduce context switching,” he said. “We need to be able to look at what we are asking them to do and what's preventing them from being able to be productive.”\n\nDayton noted that Southwest, which established a relationship with GitLab in 2019, is focused on creating consistency for its software development processes. In part, that means moving code into a shared GitLab repository. By knowing where all of their code resides, teams will be able to more easily evaluate metrics, and begin to look at creating efficiencies by reusing code. \n\n“We’re also in the process of getting our enterprise pipelines finalized and we’re ready to start migrating teams onto them,” said Dayton. “We're collaborating heavily with a lot of different application development teams to understand what they need in the pipelines that we're building and we’re getting ready to start migrating teams onto them. I think we'll be getting pretty close by the end of the year.”\n\n### The promise of AI\n\nUsing artificial intelligence is one of the ways to enable developers to focus on bigger, more innovative tasks, Dayton explained.\n\nGenerative AI, whether in the form of vulnerability explainers, code suggestions, or code completion, has the ability to dramatically affect workflows across the entire software development lifecycle. Leveraging AI tools built into a platform can increase security and decrease time spent on code reviews and application development.\n\nDayton is looking forward to being able to use AI features to speed and ease development and deployment.\n\n“We want to get the mundane and the bureaucratic out of their way as much as possible,” Dayton said, adding that while there’s a lot of hype around AI, there’s also a lot of promise. “Using AI could do that. I think a great example will be when it can provide a solution to a vulnerability that was just identified or when it can tell us what a piece of code is doing. What is it integrating with? What data is it accessing and why? Tell me in plain English, for example, that this particular set of coding has been responsible for 20% of the incidents in this application over the past year. That’s where I think AI can help.”\n\nDayton noted that he doesn’t believe AI will replace developers. Instead, it should make their jobs easier. Another way AI can help is by connecting developers in a time when many are working remotely post-COVID.\n\n“One of the cool things that's in [GitLab’s] roadmap is Suggested Reviewers,” he said. “Getting help with code reviews used to involve yelling across the room or over a cube wall, ‘Hey, can someone look at my code?’ That’s not so easy now. AI can suggest someone who's actually worked in that code before or who has resolved incidents in that code and does that sort of thing. How much value is that going to add to the review process? I think the more automation we can put in, the less manual steps or wait states there will be.”\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/UnUfp7pKnEQ?si=qcX2Qm3zpgQOV4xy\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n*Southwest Airlines is a nearly $24 billion company based in Dallas, Texas. With 72,000 employees, it flies to 120 destinations, making 4,000 flights per day.  Southwest flies more domestic passengers than any other airline.\nRead more GitLab customer stories on our [customers page](https://about.gitlab.com/customers/).*\n",[849,2771,9,2731],"DevOps platform",{"slug":2773,"featured":6,"template":680},"southwest-looking-to-help-developers-take-flight","content:en-us:blog:southwest-looking-to-help-developers-take-flight.yml","Southwest Looking To Help Developers Take Flight","en-us/blog/southwest-looking-to-help-developers-take-flight.yml","en-us/blog/southwest-looking-to-help-developers-take-flight",{"_path":2779,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2780,"content":2783,"config":2788,"_id":2790,"_type":14,"title":2791,"_source":16,"_file":2792,"_stem":2793,"_extension":19},"/en-us/blog/speed-meets-governance-model-selection-comes-to-gitlab-duo",{"noIndex":6,"title":2781,"description":2782},"Speed meets governance: Model Selection comes to GitLab Duo","Duo Model Selection provides LLM control for each GitLab Duo feature, enabling organizations to adopt AI while meeting strict governance and compliance standards.",{"title":2781,"description":2782,"authors":2784,"heroImage":861,"date":2785,"body":2786,"category":10,"tags":2787},[1255],"2025-06-25","New AI models are released almost daily, each with unique capabilities, performance characteristics, and compliance implications. At GitLab, we're committed to delivering cutting-edge AI capabilities by [continuously integrating the latest and highest-performing models as they become available](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale/). However, we know this fast pace can create complex challenges for enterprise organizations whose model usage is subject to strict governance, compliance, and security standards.\n\nMeet [GitLab Duo Model Selection](https://docs.gitlab.com/user/gitlab_duo/model_selection/), a powerful new capability that gives teams control over the large language models ([LLMs](https://about.gitlab.com/blog/what-is-a-large-language-model-llm/)) used in your organization. Available in private beta in the newly released [GitLab 18.1](https://about.gitlab.com/releases/2025/06/19/gitlab-18-1-released/) to all GitLab.com customers using Duo Enterprise, Duo Model Selection makes it easier to maintain governance, compliance, and security standards while helping accelerate innovation with agentic and generative AI. With Duo Model Selection, organizations can adopt GitLab Duo faster by selecting models from their pre-approved vendor list, versus the GitLab default model.\n\n## The benefits of GitLab Duo Model Selection\n\nDuo Model Selection gives GitLab.com namespace owners control over which AI models teams can use across different GitLab Duo features, though those without specialized requirements are recommended to use the GitLab default model. With Duo Model Selection, you can:\n\n* **Configure models at the organization level:** Set AI model preferences that apply across your organization’s entire namespace, ensuring consistent governance and compliance standards. Namespace owners can select models approved by their organization from GitLab's validated model catalog.\n\n* **Control models per GitLab Duo feature:** Different GitLab Duo features can use different models based on your specific needs.\n\nWatch Duo Model Selection in action:\n\n\u003Cdiv style=\"padding:62.21% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1094452473?autoplay=1\"badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Duo Model Selection Demo\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Join the Duo Model Selection private beta\n\nReady to take control of your AI governance? Duo Model Selection is currently in private beta for all GitLab.com customers using Duo Enterprise. To join the private beta, reach out to your GitLab account team. If you don’t have Duo, [sign up for a GitLab Duo trial](https://about.gitlab.com/gitlab-duo/#free-trial) today!\n> Find out everything that's new and exciting, including agentic AI capabilities, in GitLab 18 with our [on-demand launch event](https://about.gitlab.com/eighteen/).",[9,725],{"featured":91,"template":680,"slug":2789},"speed-meets-governance-model-selection-comes-to-gitlab-duo","content:en-us:blog:speed-meets-governance-model-selection-comes-to-gitlab-duo.yml","Speed Meets Governance Model Selection Comes To Gitlab Duo","en-us/blog/speed-meets-governance-model-selection-comes-to-gitlab-duo.yml","en-us/blog/speed-meets-governance-model-selection-comes-to-gitlab-duo",{"_path":2795,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2796,"content":2801,"config":2805,"_id":2807,"_type":14,"title":2808,"_source":16,"_file":2809,"_stem":2810,"_extension":19},"/en-us/blog/streamline-devsecops-engineering-workflows-with-gitlab-duo",{"title":2797,"description":2798,"ogTitle":2797,"ogDescription":2798,"noIndex":6,"ogImage":799,"ogUrl":2799,"ogSiteName":693,"ogType":694,"canonicalUrls":2799,"schema":2800},"Streamline DevSecOps engineering workflows with GitLab Duo","Learn all the ways GitLab Duo's AI capabilities can improve the efficiency of development workflows. Includes in-depth tutorials and demos.","https://about.gitlab.com/blog/streamline-devsecops-engineering-workflows-with-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Streamline DevSecOps engineering workflows with GitLab Duo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2024-12-05\",\n      }",{"title":2797,"description":2798,"authors":2802,"heroImage":799,"date":2163,"body":2803,"category":10,"tags":2804},[1622],"It's 9 a.m. somewhere, and a DevOps engineer is starting their day. They check their [GitLab todo list](https://docs.gitlab.com/ee/user/todos.html) to see any mentions or tasks assigned to them, collaborating with other stakeholders in their organization. These tasks can include:\n\n- managing infrastructure\n- maintaining the configuration of resources\n- maintaining CI/CD pipelines\n- automating processes for efficiency\n- maintaining monitoring and alerting systems\n- ensuring applications are securely built and deployed\n- modernizing applications with containerization\n\nTo carry out these tasks, DevOps engineers spend a lot of time reading documentation, writing configuration files, and searching for help in forums, issues boards, and blogs. Time is spent studying and understanding concepts, and how tools and technologies work. When they don't work as expected, a lot more time is spent investigating why. New tools are released regularly to solve niche or existing problems differently, which introduces more things to learn and maintain context for.\n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/), our AI-powered suite of capabilities, fits into the workflow of DevSecOps engineers, enabling them to reduce time spent solving problems while increasing their efficiency.\n\nLet's explore how GitLab Duo helps streamline workflows.\n\n## Collaboration and communication\n\nDiscussions or requests for code reviews require spending time reading comments from everyone and carefully reviewing the work shared. GitLab Duo capabilities like Discussion Summary, Code Review Summary, and Merge Request Summary increase the effectiveness of collaboration by reducing the time required to get caught up on activities and comments, with more time spent getting the actual work done.\n\n### Merge Request Summary  \n\nWriting a detailed and clear summary of the change a merge request introduces is crucial for every stakeholder to understand what, why, and how a change was made. It's more difficult than it sounds to effectively articulate every change made, especially in a large merge request. [Merge Request Summary](https://docs.gitlab.com/ee/user/project/merge_requests/duo_in_merge_requests.html#generate-a-description-by-summarizing-code-changes) analyzes the change's diff and provides a detailed summary of the changes made.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4muvSFuWWL4?si=1i2pkyqXZGn2dSbd\" title=\"GitLab Duo Chat is now aware of Merge Requests\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Discussion Summary\n\nImagine getting pulled into an issue with more than 100 comments and a lengthy description, with different perspectives and opinions shared. GitLab Duo [Discussion Summary](https://docs.gitlab.com/ee/user/discussions/index.html#summarize-issue-discussions-with-duo-chat) summarizes all the conversations in the issue and identifies tasks that need to be done, reducing time spent. \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/IcdxLfTIUgc?si=WXlINow3pLoKHBVM\" title=\"GitLab Duo Dicussion Summary\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n#### Code Review Summary\n\nA merge request has been assigned to a DevOps engineer for review in preparation for deployment, and they have spent time reviewing several parts of the change with multiple comments and suggestions. When [submitting a review](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/index.html#submit-a-review), a text box is presented to summarize the review, which often requires taking a pause and articulating the review. With [Code Review Summary](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/index.html#submit-a-review), they get a concise summary automatically drafted leading to efficiency.\n\n## Manage infrastructure changes\n\nPart of a DevOps engineer's workflow is managing infrastructure changes. Infrastructure as code ([IaC](https://docs.gitlab.com/ee/user/infrastructure/iac/)) revolutionized this process, allowing for documentation, consistency, faster recovery, accountability, and collaboration. A challenge with IaC is understanding the requirements and syntax of the chosen tool and provider where the infrastructure will be created. A lot of time is then spent reviewing documentation and tweaking configuration files until they meet expectations. \n\nWith GitLab Duo [Code Explanation](https://docs.gitlab.com/ee/user/gitlab_duo/index.html#code-explanation) and [Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/index.html), you can prompt GitLab Duo to create configuration files in your tool of choice and learn about the syntax of those tools. With Code Suggestions, you can either leverage [code generation](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/index.html#code-generation), where you prompt GitLab Duo to generate the configuration, or code completion, which provides suggestions as you type while maintaining the context of your existing configurations.\n\nAs of the time this article was published, Terraform is [supported by default](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html#supported-languages) with the right extensions for your IDEs. Other technologies can be supported with [additional language support configuration](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html#add-support-for-more-languages) for the [GitLab Workflow extension](https://docs.gitlab.com/ee/editor_extensions/visual_studio_code/index.html).\n\nWhere a technology is not officially supported, [GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html) is the powerful AI assistant that can help generate, explain, clarify, and troubleshoot your configuration, while maintaining context from selected text or opened files. Here are two demos where GitLab Duo helped create IaC with Terraform and AWS CloudFormation.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/saa2JJ57UaQ?si=Bu9jyQWwuSUcw8vr\" title=\"Manage your Infrastructure with Terraform and AI using GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/KSLk2twXqiI?si=QDdERjbM0f7X2p23\" title=\"Deploying AWS Lambda function using AWS Cloudformation with help from GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Configuration management\n\nOnce your infrastructure is up, GitLab Duo Chat can also help create configuration files and refactor existing ones. These can be Ansible configurations for infrastructure or cloud-native configurations using Docker, Kubernetes, or Helm resource files. In the videos below, I demonstrate how GitLab Duo helps with Ansible, containerization, and application deployment to Kubernetes.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/t6ZCq_jkBwY?si=awCUdu1wCgOO21XR\" title=\"Configuring your Infrastructure with Ansible & GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/KSLk2twXqiI?si=QDdERjbM0f7X2p23\" title=\"Containerizing your application with GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/uroSxvMFqPU?si=GMNC7f2b7i_cjn6F\" title=\"Deploying your application to Kubernetes with Help from GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/9yGDM00RlUA?si=kE5JZD_OEFcxeR7E\" title=\"Deploying to Kubernetes using Helm with help from GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Test, test, test\n\nWriting tests is an important part of building secure software, but it can be a chore and often becomes an afterthought. You can leverage the power of GitLab Duo to [generate tests for your code](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/index.html#vulnerability-resolution) by highlighting your code and typing the `/tests` in the Chat panel of your IDE.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/zWhwuixUkYU?si=wI93j90PIiUMyGcV\" title=\"GitLab Duo Test Generation\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### CI/CD pipeline troubleshooting\n\nAutomation is an essential part of the DevOps engineer's workflow, and Continuous Integration/Deployment ([CI/CD](https://about.gitlab.com/topics/ci-cd/)) is central to this. You can trigger CI jobs on code push, merge, or on schedule. But, when jobs fail, you spend a lot of time reading through the logs to identify why, and for cryptic errors, it can take more time to figure out. [GitLab Duo Root Cause Analysis](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/) analyzes your failed job log and errors, and then recommends possible fixes. This reduces the time spent investigating the errors and finding a fix.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Sa0UBpMqXgs?si=IyR-skz9wJMBSicE\" title=\"GitLab Duo Root Cause Analysis\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Building secure applications\n\nPart of software development includes discovering vulnerabilities, either in the application or its dependencies. Some vulnerabilities are easy to fix, while others require creating a milestone with planning. GitLab Duo [Vulnerability Explanation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/index.html#explaining-a-vulnerability) and [Vulnerability Resolution](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/index.html#vulnerability-resolution) reduce the time spent researching and fixing vulnerabilities. Vulnerability Explanation explains why a vulnerability is happening, its impact, and how to fix it, helping the DevOps engineer to upskill. Vulnerability Resolution takes it further – instead of just suggesting a fix, it creates a merge request with a fix for the vulnerability for you to review. \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/MMVFvGrmMzw?si=Fxc4SeOkCBKwUk_k\" title=\"GitLab Duo Vulnerability Explanation\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/VJmsw_C125E?si=XT3Qz5SsX-ISfCyq\" title=\"GitLab Duo Vulnerability resolution\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## More work done with less stress\n\nWith GitLab Duo, DevOps engineers can do more work deploying and maintaining secure applications, while acquiring more skills with the detailed responses from GitLab Duo Chat.\n\n> [Sign up for a free 60-day trial of GitLab Duo](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/) to get started today!",[9,109,703,482,724,725],{"slug":2806,"featured":91,"template":680},"streamline-devsecops-engineering-workflows-with-gitlab-duo","content:en-us:blog:streamline-devsecops-engineering-workflows-with-gitlab-duo.yml","Streamline Devsecops Engineering Workflows With Gitlab Duo","en-us/blog/streamline-devsecops-engineering-workflows-with-gitlab-duo.yml","en-us/blog/streamline-devsecops-engineering-workflows-with-gitlab-duo",{"_path":2812,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2813,"content":2818,"config":2824,"_id":2826,"_type":14,"title":2827,"_source":16,"_file":2828,"_stem":2829,"_extension":19},"/en-us/blog/summarize-issues",{"title":2814,"description":2815,"ogTitle":2814,"ogDescription":2815,"noIndex":6,"ogImage":820,"ogUrl":2816,"ogSiteName":693,"ogType":694,"canonicalUrls":2816,"schema":2817},"ML experiment: Summarizing issue comments","Learn how GitLab is experimenting with ML-powered issue comment summarization in this fifth installment of our ongoing AI/ML in DevSecOps series.","https://about.gitlab.com/blog/summarize-issues","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Summarizing issue comments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Melissa Ushakov\"},{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2023-04-13\",\n      }",{"title":2814,"description":2815,"authors":2819,"heroImage":820,"date":2821,"body":2822,"category":10,"tags":2823},[2820,1410],"Melissa Ushakov","2023-04-13","\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\n[GitLab issues](https://docs.gitlab.com/ee/user/project/issues/) are essential for team collaboration and serve as the source of truth for teams to align on the problem definition and scope of work for ongoing efforts. As teams collaborate on issues to refine them, the volume of comments grows. For issues with many comments, it can be challenging to understand the status of work at a glance. You may need to spend significant time reading comments to get an overview of the decisions made so far and to understand, for example, if there are any blockers.\n\nWith the recent advancements in AI and natural language processing, it's now possible for AI models to summarize text like that found in issue comments. We believe that this technology will help teams use their time more efficiently and help prevent losing track of important information within issue comments.\n\nLarge language models (LLMs) power generative AI solutions by using deep learning algorithms to analyze vast amounts of natural language data. These models are trained on massive datasets to develop an understanding of language patterns and context. Once trained, the models can generate new text that mimics human language.\n\nIn a rapid prototype, our own [Alexandru Croitor](https://gitlab.com/acroitor), Senior Backend Engineer, and [Nicolas Dunlar](https://gitlab.com/nicolasdular), Senior Frontend Engineer for our [Plan stage](https://handbook.gitlab.com/handbook/product/categories/#plan-stage), leverage generative AI LLMs to power comment summarization within [GitLab's issues](https://docs.gitlab.com/ee/user/project/issues/).\n\n![Prototype UX for comment summary](https://about.gitlab.com/images/blogimages/Issue_comment_summary_blog.gif){: .shadow}\n\nAbove, you can see an example of triggering the summarization of issue comments. Watch the full demo below.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/GMr3eHwbYAI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nOur experiment takes an individual's natural language comments, inferences them against a generative AI LLM, and through novel prompt engineering (the task of guiding LLM output through instructions), creates a summary of long comment threads. Part of our engineering exploration is examining how to chunk extremely long comment threads into parsable bits an LLM can succinctly and accurately summarize.\n\n## Iterating on AI/ML features\nWhile just an experiment today, we are iterating on how to effectively bring features like this to our customers. We're starting with summarization of issue comments, and are working to optimize prompts to provide more meaningful summaries. We are also investigating bringing this functionality to other objects like epics and merge requests.\n\nThis experiment is just the start of many ways we’re looking to infuse GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI Assisted features. We’ll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our ongoing series, \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\".\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[849,725,704,9],{"slug":2825,"featured":6,"template":680},"summarize-issues","content:en-us:blog:summarize-issues.yml","Summarize Issues","en-us/blog/summarize-issues.yml","en-us/blog/summarize-issues",{"_path":2831,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2832,"content":2837,"config":2842,"_id":2844,"_type":14,"title":2845,"_source":16,"_file":2846,"_stem":2847,"_extension":19},"/en-us/blog/summarize-my-merge-request-review",{"title":2833,"description":2834,"ogTitle":2833,"ogDescription":2834,"noIndex":6,"ogImage":820,"ogUrl":2835,"ogSiteName":693,"ogType":694,"canonicalUrls":2835,"schema":2836},"ML experiment: Summarize my merge request review","Learn how GitLab is experimenting with ML-powered merge request review summaries in this latest installment of our ongoing 'AI/ML in DevSecOps' series.","https://about.gitlab.com/blog/summarize-my-merge-request-review","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Summarize my merge request review\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kai Armstrong\"}],\n        \"datePublished\": \"2023-05-18\",\n      }",{"title":2833,"description":2834,"authors":2838,"heroImage":820,"date":2839,"body":2840,"category":10,"tags":2841},[1448],"2023-05-18","\n\n\u003Ci>This blog is the latest post in an ongoing series about GitLab’s journey to \u003Ca href=\"/blog/ai-ml-in-devsecops-series/\">build and integrate AI/ML into our DevSecOps platform\u003C/a>. The first blog post can be found \u003Ca href=\"/blog/what-the-ml-ai/\">here\u003C/a>. Throughout the series, we’ll feature blogs from our product, engineering, and UX teams to showcase how we’re infusing AI/ML into GitLab.\u003C/i>\n\nDuring the course of reviewing a merge request, you may sometimes leave many comments. Those comments may have specific information about things that need to be changed, or context for why you're leaving feedback on the proposed changes. If you've left a lot of comments, it might be hard to remember everything you've said and what the author should look at to resolve your feedback.\n\nIn a rapid prototype, [Stanislav Lashmanov](https://gitlab.com/slashmanov), Senior Frontend Engineeer for our [Code Review Group](https://handbook.gitlab.com/handbook/product/categories/#code-review-group), used AI/ML to summarize your merge request review when [submitting your review](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/#submit-a-review). He developed a new AI action that provides a summary, and allows you to edit or revise prior to submitting the review:\n\n![Summarize my merge request review via AI](https://about.gitlab.com/images/blogimages/summarize-my-merge-request-review-ai.gif){: .shadow}\n\nProviding authors with these review summaries allows them to quickly understand the feedback and scope of revisions required without the need to process the entire review. This helps to speed up the cycle time for teams as they work through review rounds in merge requests.\n\n## Iterating on AI/ML features\n\nWhile just an experiment today, we are iterating on how to effectively bring features like this to our customers. We'll continue to refine the type of review feedback we provide, and then look at how we can better integrate these summaries in to the review cycle. You can see some of our [design efforts](https://gitlab.com/gitlab-org/gitlab/-/issues/408307) we've been exploring with the [summarizing merge request changes](/blog/merge-request-changes-summary-ai/) feature to get an idea of our possible direction.\n\nThis experiment is just the start of the ways we're infusing GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI-assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\" series.\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[703,725,704,9],{"slug":2843,"featured":6,"template":680},"summarize-my-merge-request-review","content:en-us:blog:summarize-my-merge-request-review.yml","Summarize My Merge Request Review","en-us/blog/summarize-my-merge-request-review.yml","en-us/blog/summarize-my-merge-request-review",{"_path":2849,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2850,"content":2856,"config":2861,"_id":2863,"_type":14,"title":2864,"_source":16,"_file":2865,"_stem":2866,"_extension":19},"/en-us/blog/supercharge-productivity-with-gitlab-duo",{"title":2851,"description":2852,"ogTitle":2851,"ogDescription":2852,"noIndex":6,"ogImage":2853,"ogUrl":2854,"ogSiteName":693,"ogType":694,"canonicalUrls":2854,"schema":2855},"Supercharge productivity with generative AI and GitLab Duo","Learn how the GitLab DevSecOps platform leverages generative AI and large language models to enable organizations to boost productivity and efficiency.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674838/Blog/Hero%20Images/duo-blog-post.png","https://about.gitlab.com/blog/supercharge-productivity-with-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Supercharge productivity with generative AI and GitLab Duo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2023-07-20\",\n      }",{"title":2851,"description":2852,"authors":2857,"heroImage":2853,"date":2858,"body":2859,"category":10,"tags":2860},[1622],"2023-07-20","\nIn GitLab's [2023 Global DevSecOps Report](https://about.gitlab.com/developer-survey/previous/2023/), developer productivity and operational efficiency were top of mind for most respondents and ranked as the two top benefits organizations strive for when adopting DevSecOps methodologies. At the same time, artificial intelligence (AI) and machine learning (ML) have become highly relevant to research and development teams in recent years and have gained increased importance in the software development process of many companies. Across the software development lifecycle, generative AI is enabling faster development cycles, less time spent on code reviews, and more secure software development practices.\n\nAt GitLab, we’re using generative AI and ML to deliver productivity and efficiency gains to DevSecOps teams throughout the software development lifecycle. In this blog post, we will look at how the suite of [GitLab Duo](https://about.gitlab.com/gitlab-duo/) AI capabilities boost productivity. First, let's understand the underlying technology: generative AI.\n\n## What is generative AI?\nGenerative AI is a branch of AI capable of generating new content. The generated content is created by using patterns learned from examples or input data during the training process. One of the most popular architectures used to learn from data is a [transformer model](https://blogs.nvidia.com/blog/what-is-a-transformer-model/). Given the success of transformer models in natural language processing (NLP) and their flexibility in being adapted or fine-tuned to different and more specific tasks, they have been called \"foundation models,\" a term coined by Stanford researchers in an [August 2021 research paper](https://arxiv.org/pdf/2108.07258.pdf). \n\nThese models, when trained on text data, are capable of learning context and meaning by tracking relationships in the input data. A successful application of transformer models is [large language models (LLMs)](https://www.nvidia.com/en-us/glossary/data-science/large-language-models/): algorithms trained with petabyte-scale, text-based data sets to recognize, predict, and generate various forms of content. Examples of LLMs include Google’s [PaLM 2](https://blog.google/technology/ai/google-palm-2-ai-large-language-model/), OpenAI’s [GPT series](https://openai.com/gpt-4) and Meta’s [Llama 2](https://ai.meta.com/llama/). According to [Ark Invest](https://ark-invest.com/big-ideas-2023/artificial-intelligence/), generative AI is expected to increase the productivity of knowledge workers more than fourfold by 2030.\n\nThe GitLab DevSecOps Platform leverages generative AI and LLMs to power [GitLab Duo](https://about.gitlab.com/gitlab-duo/), a suite of AI capabilities that enable organizations to boost efficiency and ship secure software faster. Now, let’s take a look at the capabilities of GitLab Duo.\n\n## Efficient & secure software development\n\nIn a research paper looking into the impact of AI on developer productivity published in February 2023, [Peng et al.](https://arxiv.org/abs/2302.06590) found that developers who used AI-powered tools completed their tasks 55.8% faster than those who didn’t. In this section, we'll look at how GitLab Duo can help teams reduce development time, improve developer productivity, and ensure secure software development.\n\n### Code Suggestions\nEfficient developer experience is critical to productivity, and GitLab Duo [Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html) improves the developer experience and leads to shorter cycle times. If you want to import dependencies in your favourite programming language or create a function along with unit tests, Code Suggestions automatically provides suggestions in your IDE with a high degree of accuracy.  \n\nCode Suggestions is currently available in Beta for all tiers on GitLab.com and in self-managed GitLab from Version 16.1. You can [use Code Suggestions with the GitLab WebIDE, Visual Studio Code](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html), and [other IDEs](https://about.gitlab.com/blog/extending-code-suggestions/). \n\n### Code explanations\nDevelopers spend a ton of time on search engines trying to figure out what a block of code does or why it behaves the way it does. The [\"explain this code\"](https://docs.gitlab.com/ee/user/ai_features.html#explain-selected-code-in-the-web-ui) feature is currently an [experiment](https://docs.gitlab.com/ee/policy/experiment-beta-support.html#experiment) on GitLab.com and uses LLMs to explain code in natural language, including context based on the code selected for it to explain.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/xzsFfFqvlnU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n### AI-generated tests for code changes\nEnsuring code changes are thoroughly tested is hard work and can be time-consuming. In your merge requests, with generative AI, you can generate test files for your code to provide coverage for the change introduced, reducing the time spent writing tests.\n\n![Merge request test coverage with AI](https://about.gitlab.com/images/blogimages/merge-request-generate-tests-ai.gif)\n\n### Vulnerability explanations\nA critical part of preventing the escalation of vulnerabilities is understanding why vulnerabilities were discovered and how to fix them. This takes much effort to research before making an informed decision on the next action step. [GitLab Duo vulnerability recommendations](https://about.gitlab.com/blog/explain-this-vulnerability/) provide detailed information on identified vulnerabilities, including context from your code, how they can be exploited, and example fixes, thus allowing quick remediation of vulnerabilities.\n\n![Explain this vulnerability](https://about.gitlab.com/images/blogimages/2023-04-27-explain-this-vulnerability.png)\n\n### Value stream forecasting\n\nThe efficiency of the software development lifecycle is critical to a team’s productivity and quality of value delivery. It is critical for software leaders to identify trends from events occuring in the lifecycle and implement changes to improve efficiency. GitLab’s [Value Streams Dashboard](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html) allows teams to track software performance and the flow of value across the software development lifecycle with [DORA metrics](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html), [value stream analytics](https://docs.gitlab.com/ee/user/group/value_stream_analytics/index.html), and [vulnerability reports](https://gitlab.com/gitlab-org/gitlab/-/security/vulnerability_report).\n\n## Improved code review processes\n\n[In a 2019 survey](https://static1.smartbear.co/smartbearbrand/media/pdf/the-2019-state-of-code-review.pdf), 55% of respodents were either indifferent to or dissatisfied by their team’s code review process, and this is largely due to code reviews being seen as added work ([necessary but often time-consuming](https://codeclimate.com/blog/time-wasting-code-review)). In this section, we'll look at how GitLab Duo can help reduce review cycles and increase speed.\n\n### Merge request change summary\n\nOnce your changes are ready for review, you create a merge request, which allows you to collaborate with other stakeholders. Merge request descriptions often give the context on why the changes were made, but sometimes don’t include details of the changes themselves, except if you look at the code. Leveraging AI and LLMs, GitLab can provide relevant summaries of merge requests, reducing time spent ensuring the merge request description is up to date as changes evolve. All you need to do is use the `/summarize_diff` [quick action](https://docs.gitlab.com/ee/user/project/quick_actions.html) to add a summary of changes in a comment.\n\n![Merge request changes summary with AI](https://about.gitlab.com/images/blogimages/merge-request-changes-summary-ai.gif) \n\n### Issue comment summary\n\nWhen planning work, discussion is an essential element of collaboration. Discussions can become lengthy and catching up on all the comments can be a challenge, especially in an organization with an asynchronous culture. Similar to summarizing merge request changes, GitLab can [summarize issue comments](https://about.gitlab.com/blog/summarize-issues/), which is valuable in efficiently understanding the status of work from the issue discussions and the next steps to take. Here is a video of how it works:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/GMr3eHwbYAI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n### Suggested reviewers\nIdentifying reviewers can be time-consuming, especially with cross-functional collaborations in a large team. When enabled by project maintainers or owners, GitLab [suggests reviewers](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/index.html#suggested-reviewers) for a merge request based on the code changes made and the project’s contribution graph. This leads to higher-quality feedback from team members with domain knowledge and increases the speed of reviews.\n\n### Merge request review summary\nCode reviewers often leave feedback across several parts of the changes they’ve reviewed, then proceed to write a separate comment to give an overview of all the feedback given. The reviewer spends valuable time ensuring the merge request author has the context to understand the review. With GitLab Duo, the reviewer can simply use the [\"summarize my code review\"](/blog/summarize-my-merge-request-review/) capability to provide authors with context around the feedback, without the need to process the entire review. \n\n![Summarize code review](https://about.gitlab.com/images/blogimages/summarize-my-merge-request-review-ai.gif)\n\n## What's coming next\nGitLab is continuously iterating on our [AI-assisted capabilities](https://about.gitlab.com/blog/ai-ml-in-devsecops-series/) to find innovative ways to enable you to efficiently build more secure software faster, while putting privacy first in a single application that gives every stakeholder visibility. This includes AI [experiments](https://docs.gitlab.com/ee/policy/experiment-beta-support.html#experiment) such as [GitLab Duo Chat](https://docs.gitlab.com/ee/user/ai_features.html#gitlab-duo-chat), which uses generative AI to answer product-specific questions about GitLab, reducing time spent reviewing the GitLab documentation. Another experiment is the \"fill in merge request description\" capability, which uses AI and content from the proposed changes to fill in a merge request decription template.\n\n![Summarize MR Descriptions](https://about.gitlab.com/images/blogimages/summarize-mr-description.gif)\n\nGitLab Duo’s AI-assisted workflows enable teams in every phase of the software development lifecycle to deliver secure software faster with increased efficiency and reduced cycle times. Learn more about [GitLab Duo here](https://about.gitlab.com/gitlab-duo/).\n",[703,482,9],{"slug":2862,"featured":6,"template":680},"supercharge-productivity-with-gitlab-duo","content:en-us:blog:supercharge-productivity-with-gitlab-duo.yml","Supercharge Productivity With Gitlab Duo","en-us/blog/supercharge-productivity-with-gitlab-duo.yml","en-us/blog/supercharge-productivity-with-gitlab-duo",{"_path":2868,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2869,"content":2875,"config":2881,"_id":2883,"_type":14,"title":2884,"_source":16,"_file":2885,"_stem":2886,"_extension":19},"/en-us/blog/the-gitlab-ai-security-framework-for-security-leaders",{"title":2870,"description":2871,"ogTitle":2870,"ogDescription":2871,"noIndex":6,"ogImage":2872,"ogUrl":2873,"ogSiteName":693,"ogType":694,"canonicalUrls":2873,"schema":2874},"The GitLab AI Security Framework for security leaders","Discover how GitLab Duo's security controls, third-party integrations, and retention policies help teams safely implement AI into their development workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664299/Blog/Hero%20Images/AdobeStock_887599633.jpg","https://about.gitlab.com/blog/the-gitlab-ai-security-framework-for-security-leaders","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The GitLab AI Security Framework for security leaders\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kyle Smith\"},{\"@type\":\"Person\",\"name\":\"Ayoub Fandi\"}],\n        \"datePublished\": \"2025-03-04\",\n      }",{"title":2870,"description":2871,"authors":2876,"heroImage":2872,"date":2878,"body":2879,"category":10,"tags":2880},[2877,763],"Kyle Smith","2025-03-04","As companies rapidly adopt AI technologies, CISOs face a new frontier of security challenges. Many security leaders find themselves grappling with unfamiliar questions: How do we evaluate AI vendors differently from traditional software vendors? What security controls matter most? Where does vendor responsibility end and customer responsibility begin? How do we evaluate AI security risks within the context of the service provided? To help answer these questions, we’ve created the [GitLab AI Security Framework](https://trust.gitlab.com/?itemUid=ad3d92c1-889e-49fc-b19c-2434f70071ee&source=click) to show security leaders how GitLab and customers enable secure AI-powered development using GitLab Duo.\n\n## The genesis of AI security challenges\n\nFrom conversations with security leaders across industries a pattern has emerged: Organizations are rapidly embracing AI technologies to improve delivery while their security teams struggle to establish appropriate security controls. \n\nThis disconnect isn't just a matter of resources or expertise – it represents a fundamental shift in how organizations need to approach security in the AI era. Security leaders are witnessing quick and unprecedented adoption of AI across their organizations, from development teams using coding assistants to marketing departments leveraging generative AI. \n\nWhile organizations are integrating AI within their own software, many of their current vendor-provided SaaS applications have added AI capabilities as well. Although this adoption drives innovation and efficiency, it also creates a complex set of security considerations that traditional frameworks weren't designed to address. Below are some of the specific challenges we’ve identified.\n\n## Security challenges in the AI era\n\n**1. Responsibility and control uncertainty**\n\nThe rapid pace of AI adoption has left many organizations without a coherent security governance strategy. Security teams find themselves trying to retrofit existing security frameworks to address AI-specific concerns. Security leaders face challenges in understanding where their responsibilities begin and end when it comes to AI security. The traditional vendor-customer relationship becomes more complex with AI systems, as data flows, model training, and inference processes create new types of interactions and dependencies. \n\n**2. Risk assessment evolution**\n\nTraditional security risk models struggle to capture the unique characteristics of AI systems. Security leaders are finding that standard risk assessment frameworks don't adequately address AI-specific risks. AI security risks will differ based on AI implementation and the context in which it’s used. The challenge is compounded by the need to evaluate AI vendors without necessarily having deep technical AI expertise on the security team.\n\n**3. Data protection complexities**  \nAI systems present unique challenges for data protection. The way these systems process, learn from, and generate data creates new privacy and security considerations that organizations should carefully evaluate. CISOs must ensure their data governance frameworks evolve to address how AI systems use and protect sensitive information. AI implementations with inadequate safeguards might inadvertently reveal protected information via AI generated outputs.\n\n**4. Compliance and standards navigation**  \nThe regulatory landscape for AI security is rapidly evolving, with new standards like ISO 42001 and others emerging alongside existing frameworks. Security leaders must navigate this complex environment while ensuring their AI implementations remain compliant with both current and anticipated regulations. This requires a delicate balance between enabling AI adoption and maintaining robust security controls that satisfy regulatory requirements.\n\n## Addressing these challenges  \nWith the release of [GitLab Duo](https://about.gitlab.com/gitlab-duo/), we recognized these executive-level concerns and developed a comprehensive framework to help organizations navigate AI security in the context of our AI-powered DevSecOps platform. Our AI Security Framework provides details on our privacy-first implementation of AI to enable GitLab Duo, and how we validate the security of our AI vendors. A responsibility matrix is included to help security leaders manage their AI security responsibilities while enabling their organizations to innovate safely. We also compiled a selection of AI-specific security risks to keep in mind and highlighted how GitLab capabilities like [prompt guardrails](https://about.gitlab.com/blog/how-gitlab-uses-prompt-guardrails-to-help-protect-customers/) can help in mitigating them. \n\n> Want a deeper look at our security controls? Check out our [AI Security Framework](https://trust.gitlab.com/?itemUid=ad3d92c1-889e-49fc-b19c-2434f70071ee&source=click).\n\n## Learn more\n- [GitLab AI Transparency Center](https://about.gitlab.com/ai-transparency-center/)\n- [How GitLab uses prompt guardrails to help protect customers](https://about.gitlab.com/blog/how-gitlab-uses-prompt-guardrails-to-help-protect-customers/)\n- [Improve AI security in GitLab with composite identities](https://about.gitlab.com/blog/improve-ai-security-in-gitlab-with-composite-identities/)\n- [Secure, compliant, and AI-powered: Get to know 3 new GitLab features](https://about.gitlab.com/blog/secure-compliant-and-ai-powered-get-to-know-3-new-gitlab-features/)\n- [ICYMI: Key AI and security insights from our developer community](https://about.gitlab.com/blog/icymi-key-ai-and-security-insights-from-our-developer-community/)",[9,704,482,678],{"slug":2882,"featured":91,"template":680},"the-gitlab-ai-security-framework-for-security-leaders","content:en-us:blog:the-gitlab-ai-security-framework-for-security-leaders.yml","The Gitlab Ai Security Framework For Security Leaders","en-us/blog/the-gitlab-ai-security-framework-for-security-leaders.yml","en-us/blog/the-gitlab-ai-security-framework-for-security-leaders",{"_path":2888,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2889,"content":2895,"config":2900,"_id":2902,"_type":14,"title":2903,"_source":16,"_file":2904,"_stem":2905,"_extension":19},"/en-us/blog/the-road-to-smarter-code-reviewer-recommendations",{"title":2890,"description":2891,"ogTitle":2890,"ogDescription":2891,"noIndex":6,"ogImage":2892,"ogUrl":2893,"ogSiteName":693,"ogType":694,"canonicalUrls":2893,"schema":2894},"The road to smarter code reviewer recommendations","Machine learning is coming to GitLab's code review process. Here's what you need to know, and how you can help!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668426/Blog/Hero%20Images/retrospectivesgitlabpost.jpg","https://about.gitlab.com/blog/the-road-to-smarter-code-reviewer-recommendations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The road to smarter code reviewer recommendations\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2022-01-04\",\n      }",{"title":2890,"description":2891,"authors":2896,"heroImage":2892,"date":2897,"body":2898,"category":1156,"tags":2899},[1410],"2022-01-04","\nYou may recall back in June 2021, we [announced the acquisition of UnReview](/press/releases/2021-06-02-gitlab-acquires-unreview-machine-learning-capabilities/), a machine learning (ML) based solution for automatically identifying appropriate expert [code reviewers](/stages-devops-lifecycle/create/) and controlling review workloads and distribution of knowledge.\n\nAt the start of the new year we wanted to provide an update on our integration progress and our wider vision of leveraging machine learning to make GitLab's [DevOps Platform](/solutions/devops-platform/) smarter. You see, the acquisition of UnReview also was the initial staffing of [our new ModelOps stage](/direction/modelops/).\n\n### Our Newest DevOps Stage\n\nThis new stage, which we’ve named ModelOps, is focused on enabling and empowering data science workloads on GitLab. GitLab ModelOps aims to bring data science into GitLab both within existing features to make them smarter and more intelligent, but also empowering GitLab customers to build and integrate data science workloads within GitLab.\n\nSo what is ModelOps you may wonder? We view ModelOps as an all encompassing term to cover the entire end to end lifecycle of artificial intelligence models. We wanted to set our vision wide to fully cover everything needed to power data science workloads. DataOps is the processing of data workloads (think traditional ELT: extract, load, transform) and MLOps is the building, training, and deployment of machine learning models. If you’re confused don’t worry, it’s a lot to wrap your head around.\n\n![a look at the stages of MLOps](https://about.gitlab.com/images/blogimages/MLops.png){: .shadow.small.center}\n\nToday our DevOps Platform helps plan, build, test, secure, deploy, and monitor traditional software. Now we want to extend our DevOps Platform to include AI and ML workloads. If this is interesting to you, be sure to check out our recent Contribute talk where we dive deeper into plans for our ModelOps stage.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/C08QVI99JLo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### UnReview as our first feature\n\nSo what does this have to do with UnReview? Our acquisition of UnReview is going to be our first [AI Assisted](/direction/ai-powered/) group’s feature: suggested reviewers within [GitLab’s existing reviewers experience](/blog/merge-request-reviewers/). Today, a developer in a merge request has to manually choose a reviewer to look at their code. With UnReview we can leverage the contribution history for a project and recommend someone well-suited for code review of your specific changes.\n\nHere’s an early mockup (and it may differ from our final UI) of how we’re thinking about this integration:\n\n![an early mockup of our UI](https://about.gitlab.com/images/blogimages/codereviewmockup.png){: .shadow.small.left}\n\nThe UnReview algorithm looks at a variety of data points from your project’s contribution history to suggest an appropriate reviewer. We’re still in the early days of this integration but our initial internal testing shows great suggestions.\n\n### Customer beta coming soon!\n\nThis leads me to a final question, might you want to be one of our first customers to try this new code review experience? In early 2022, we’ll begin a private customer beta of this new functionality. If interested, [fill out this form to express interest](https://docs.google.com/forms/d/e/1FAIpQLScpmCwpwyBr0GrXxBQ6vE02eokclFAs9lFk_g5dcyuGaHqFuQ/viewform). Do note that we can’t accept everyone and we’ll focus initially on customer profiles that are well suited for the initial version of the suggestion algorithm. Our only ask is we’d like to find customers with active projects that have a healthy number of contributors. The model currently works best on larger repositories with lots of contributors where it may not immediately be clear who is an ideal code reviewer.\n\nWe can’t wait for customers to begin using this new reviewer suggestion experience and will be providing more updates in early 2022.\n",[849,231,746,9],{"slug":2901,"featured":6,"template":680},"the-road-to-smarter-code-reviewer-recommendations","content:en-us:blog:the-road-to-smarter-code-reviewer-recommendations.yml","The Road To Smarter Code Reviewer Recommendations","en-us/blog/the-road-to-smarter-code-reviewer-recommendations.yml","en-us/blog/the-road-to-smarter-code-reviewer-recommendations",{"_path":2907,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2908,"content":2914,"config":2919,"_id":2921,"_type":14,"title":2922,"_source":16,"_file":2923,"_stem":2924,"_extension":19},"/en-us/blog/there-is-no-mlops-without-devsecops",{"title":2909,"description":2910,"ogTitle":2909,"ogDescription":2910,"noIndex":6,"ogImage":2911,"ogUrl":2912,"ogSiteName":693,"ogType":694,"canonicalUrls":2912,"schema":2913},"Building GitLab with GitLab: Why there is no MLOps without DevSecOps","Follow along as data scientists adopt DevSecOps practices and enjoy the benefits of automation, repeatable workflows, standardization, and automatic provisioning of infrastructure.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659740/Blog/Hero%20Images/building-gitlab-with-gitlab-no-type.png","https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: Why there is no MLOps without DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2023-10-05\",\n      }",{"title":2909,"description":2910,"authors":2915,"heroImage":2911,"date":2916,"body":2917,"category":10,"tags":2918},[2354],"2023-10-05","\nBuilding predictive models requires a good amount of experimentation and iterations. Data scientists building those models usually implement workflows involving several steps such as data loading, processing, training, testing, and deployment. Such workflows or data science pipelines come with a set of challenges on their own; some of these common challenges are:\n- prone to error due to manual steps\n- experimentation results that are hard to replicate\n- long training time of machine learning (ML) models \n\nWhen there is a challenge, there is also an opportunity; in this case, those challenges represent an opportunity for data scientists to adopt DevSecOps practices and enjoy the benefits of automation, repeatable workflows, standardization, and automatic provisioning of infrastructure needed for data-driven applications at scale.\n\nThe [Data Science team at GitLab](https://about.gitlab.com/handbook/business-technology/data-team/organization/data-science/) is now utilizing the GitLab DevSecOps Platform in their workflows, specifically to:\n- enhance experiment reproducibility by ensuring code and data execute in a standardized container image\n- automate training and re-training of ML models with GPU-enabled CI/CD\n- leverage ML experiment tracking, storing the most relevant metadata and artifacts produced by data science pipelines automated with CI\n\nAt GitLab, we are proponents of \"dogfooding\" our platform and sharing how we use GitLab to build GitLab. What follows is a detailed look at the Data Science team's experience.\n\n### Enhancing experiment reproducibility \nA baseline step to enhance reproducibility is having a common and standard experiment environment for all data scientists to run experiments in their Jupyter Notebooks. A standard data science environment ensures that all team members use the same software dependencies. A way to achieve this is by building a container image with all the respective dependencies under version control and re-pulling it every time a new version of the code is run. This process is illustrated in the figure below:\n\n![build](https://about.gitlab.com/images/blogimages/2023-10-04-there-is-no-mlops-without-devsecops/build-2.png)\nData science image of automatic build using GitLab CI \n{: .note.text-center}\n\nYou might wonder if the image gets built every time there is a new commit. The answer is \"no\" since that would result in longer execution times, and the image dependencies versions don’t change frequently, rendering it unnecessary to build it every time there is a new commit. Therefore, once the standard image is automatically built by the pipeline, it is pushed to the GitLab Container Registry, where it is stored and ready to be pulled every time changes to the model code are introduced, and re-training is necessary.\n\n![registry](https://about.gitlab.com/images/blogimages/2023-10-04-there-is-no-mlops-without-devsecops/registry.png)\nGitLab Container Registry with image automatically built and pushed by a CI pipeline\n{: .note.text-center}\n\nChanges to the image dependencies or Dockerfile require a [merge request](https://docs.gitlab.com/ee/user/project/merge_requests/) and an approval process.\n\n### How to build the data science image using GitLab CI/CD\nConsider this project structure:\n\n```\nnotebooks/\n.gitlab-ci.yml\nDockerfile\nconfig.yml\nrequirements.txt\n```\nGitLab's Data Science team already had a pre-configured JupyterLab image with packages such as [gitlabds](https://pypi.org/project/gitlabds/1.0.0/) for common data preparation tasks and modules to enable Snowflake connectivity for loading raw data. All these dependencies are reflected in the Dockerfile at the root of the project, plus all the steps necessary to build the image: \n\n```\nFROM nvcr.io/nvidia/cuda:12.1.1-base-ubuntu22.04\nCOPY .    /app/\nWORKDIR /app\nRUN apt-get update\nRUN apt-get install -y python3.9\nRUN apt-get install -y python3-pip\nRUN pip install -r requirements.txt\n```\n\nThe instructions to build the data science image start with using Ubuntu with CUDA drivers as a base image. We are using this baseline image because, moving forward, we will use GPU hardware to train models. The rest of the steps include installing Python 3.9 and the dependencies listed in `requirements.txt` with their respective versions. \n\nAutomatically building the data science image using [GitLab CI/CD](https://about.gitlab.com/topics/ci-cd/) requires us to create the `.gitlab-ci.yml ` at the root of the project and use it to describe the jobs we want to automate. For the time being, let’s focus only on the `build-ds-image`job:\n\n```\nvariables:\n  DOCKER_HOST: tcp://docker:2375\n  MOUNT_POINT: \"/builds/$CI_PROJECT_PATH/mnt\"\n  CONTAINER_IMAGE: \"$CI_REGISTRY_IMAGE/main-image:latest\"\n\nstages:\n    - build\n    - train\n    - notify\ninclude:\n  - template: 'Workflows/MergeRequest-Pipelines.gitlab-ci.yml'\nworkflow:\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n    - if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS\n      when: never\n\nbuild-ds-image:\n  tags: [ saas-linux-large-amd64 ]\n  stage: build\n  services:\n    - docker:20.10.16-dind\n  image:\n    name: docker:20.10.16\n  script:\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - docker build -t $CONTAINER_IMAGE .\n    - docker push $CONTAINER_IMAGE\n  rules:\n    - if: '$CI_PIPELINE_SOURCE == \"merge_request_event\" && $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH'\n      changes:\n       - Dockerfile\n       - requirements.txt\n\n  allow_failure: true\n```\n\nAt a high level, the job `build-ds-image`:\n- uses a docker-in-docker service (dind) necessary to create docker images in GitLab CI/CD.\n- uses [predefined variables](link) to log into the GitLab Container Registry, build the image, tag it using $CONTAINER_IMAGE variable, and push it to the registry. These steps are declared in the script section lines.\n- leverages a  `rules` section to evaluate conditions to determine if the job should be created. In this case, this job runs only if there are changes to the Dockerfile and requirements.txt file and if those changes are created using a merge request.\n\nThe conditions declared in `rules` helps us optimize the pipeline running time since the image gets rebuilt only when necessary.\n\nA complete pipeline can be found in this example project, along with instructions to trigger the automatic creation of the data science image: [Data Science CI pipeline](https://gitlab.com/gitlab-data/data-science-ci-example/-/blob/main/.gitlab-ci.yml?ref_type=heads).\n\n### Automate training and re-training of ML models with GPU-enabled CI/CD\nGitLab offers the ability to leverage GPU hardware and, even better, to get this hardware automatically provisioned to run jobs declared in the .gitlab-ci.yml file. We took advantage of this capability to train our ML models faster without spending time setting up or configuring graphics card drivers. Using GPU hardware ([GitLab Runners](https://docs.gitlab.com/ee/ci/runners/saas/gpu_saas_runner.html)) requires us to add this line to the training job: \n\n```\ntags:\n        - saas-linux-medium-amd64-gpu-standard\n```\n\nThe tag above will ensure that a GPU GitLab Runner automatically picks up every training job.\nLet’s take a look at the entire training job in the .gitlab-ci.yml file and break down what it does:\n\n```\ntrain-commit-activated:\n    stage: train\n    image: $CONTAINER_IMAGE\n    tags:\n        - saas-linux-medium-amd64-gpu-standard\n    script:\n        - echo \"GPU training activated by commit message\"\n        - echo \"message passed is $CI_COMMIT_MESSAGE\"\n        - notebookName=$(echo ${CI_COMMIT_MESSAGE/train})\n        - echo \"Notebook name $notebookName\"\n        - papermill -p is_local_development False -p tree_method 'gpu_hist' $notebookName -\n    rules:\n        - if: '$CI_COMMIT_BRANCH == \"staging\"'\n          when: never\n        - if: $CI_COMMIT_MESSAGE =~ /\\w+\\.ipynb/\n          when: always\n          allow_failure: true\n    artifacts:\n      paths:\n        - ./model_metrics.md\n````\n\nLet’s start with this block:\n\n```\ntrain-commit-activated:\n    stage: train\n    image: $CONTAINER_IMAGE\n    tags:\n        - saas-linux-medium-amd64-gpu-standard\n```\n\n- **train-commit-activated** This is the name of the job. Since the model training gets activated given a specific pattern in the commit message, we use a descriptive name to easily identify it in the larger pipeline.\n- **stage: train** This specifies the pipeline stage where this job belongs. In the first part of the CI/CD configuration, we defined three stages for this pipeline: `build`, `train`,  and `notify`. This job comes after building the data science container image. The order is essential since we first need the image built to run our training code in it.\n- **image: $CONTAINER_IMAGE** Here, we specify the Docker image built in the first job that contains the CUDA drivers and necessary Python dependencies to run this job. $CONTAINER_IMAGE is a user-defined variable specified in the variables section of the .gitlab-ci.yml file. \n- **tags: saas-linux-medium-amd64-gpu-standard** As mentioned earlier, using this line, we ask GitLab to automatically provision a GPU-enabled Runner to execute this job.\n\nThe second block of the job:\n\n```\nscript:\n        - echo \"GPU training activated by commit message\"\n        - echo \"message passed is $CI_COMMIT_MESSAGE\"\n        - notebookName=$(echo ${CI_COMMIT_MESSAGE/train})\n        - echo \"Notebook name $notebookName\"\n        - papermill -p is_local_development False -p tree_method 'gpu_hist' $notebookName -\n```\n\n- **script** This section contains the commands in charge of running the model training. The execution of this job is conditioned to the contents of the  commit message. The commit message must have the name of the Jupyter Notebook that contains the actual model training code.\n\nThe rationale behind this approach is that we wanted to keep the data scientist workflow as simple as possible. The team had already adopted the [modeling templates](https://gitlab.com/gitlab-data/data-science/-/tree/main/templates) to start building predictive models quickly. Plugging the CI pipeline into their modeling workflow was a priority to ensure productivity would remain intact. With these steps:\n\n```\nnotebookName=$(echo ${CI_COMMIT_MESSAGE/train})\n        - echo \"Notebook name $notebookName\"\n        - papermill -p is_local_development False -p tree_method 'gpu_hist' $notebookName -\n```\n\nThe CI pipeline captures the name of the Jupyter Notebook with the training modeling template and passes parameters to ensure [XGBoost](https://xgboost.readthedocs.io/en/stable/) uses the provisioned GPU. You can find an example of the Jupyter modeling template that is executed in this job [here](https://gitlab.com/gitlab-data/data-science-ci-example/-/blob/main/notebooks/training_example.ipynb?ref_type=heads).\n\nOnce the data science image is built, it can be reutilized in further model training jobs. The `train-commit-activated` job pulls the image from the GitLab Container Registry and utilizes it to run the ML pipeline defined in the training notebook. This is illustrated in the `CI Job - Train model` in the figure below:\n\n![training](https://about.gitlab.com/images/blogimages/2023-10-04-there-is-no-mlops-without-devsecops/training_job.png)\nTraining job executes ML pipeline defined in the modeling notebook\n{: .note.text-center}\n\nSince our image contains CUDA drivers and GitLab automatically provisions GPU-enabled hardware, the training job runs significantly faster with respect to standard hardware.\n\n### Using GitLab ML experiment tracker\nEach model training execution triggered using GitLab CI is an experiment that needs tracking. Using Experiment tracking in GitLab helps us to record metadata that comes in handy to compare model performance and collaborate with other data scientists by making result experiments available for everyone and providing a detailed history of the model development.\n\n![experiments](https://about.gitlab.com/images/blogimages/2023-10-04-there-is-no-mlops-without-devsecops/experiments.png)\nExperiments automatically logged on every CI pipeline GPU training run \n{: .note.text-center}\n\nEach model artifact created can be traced back to the pipeline that generated it, along with its dependencies:\n\n![traceability](https://about.gitlab.com/images/blogimages/2023-10-04-there-is-no-mlops-without-devsecops/traceability_small.png)\nModel traceability from pipeline run to candidate details\n{: .note.text-center}\n\n### Putting it all together\nWhat is machine learning without data to learn from? We also leveraged the [Snowflake](https://www.snowflake.com/en/) connector in the model training notebook and automated the data extraction whenever the respective commit triggers a training job. Here is an architecture of the current solution with all the parts described in this blog post:\n\n![process](https://about.gitlab.com/images/blogimages/2023-10-04-there-is-no-mlops-without-devsecops/training_fixed.png)\nData Science pipelines automated using GitLab DevSecops Platform\n{: .note.text-center}\n\n| Challenge | Solution |\n| ------ | ------ | \n|Prone to error due to manual steps | Automate steps with [GitLab CI/CD](https://docs.gitlab.com/ee/ci/) |\n|Experimentation results that are hard to replicate    |  Record metadata and model artifacts with [GitLab Experiment Tracker](https://docs.gitlab.com/ee/user/project/ml/experiment_tracking/)    |\n|The long training time of machine learning models     |  Train models with [GitLab SaaS GPU Runners](https://docs.gitlab.com/ee/ci/runners/saas/gpu_saas_runner.html)  |\n\nIterating on these challenges is a first step towards MLOps, and we are at the tip of the iceberg; in coming iterations, we will adopt security features to ensure model provenance (software bill of materials) and code quality, and to monitor our ML workflow development with value stream dashboards. But so far, one thing is sure: **There is no MLOps without DevSecOps**.\n\nGet started automating your data science pipelines, follow this [tutorial](https://about.gitlab.com/handbook/business-technology/data-team/platform/ci-for-ds-pipelines/) and clone this [data-science-project](https://gitlab.com/gitlab-data/data-science-ci-example) to follow along and watch this demo of using GPU Runners to train [XGBoost](https://xgboost.readthedocs.io/en/stable/) model.\n\nSee how data scientists can train ML models with GitLab GPU-enabled Runners (XGBoost 5-minute demo):\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/tElegG4NCZ0?si=L1IZfx_UGv6u81Gk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## More \"Building GitLab with GitLab\" blogs\nRead more of our \"Building GitLab with GitLab\" series:\n- [How we use Web API fuzz testing](https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow/)\n- [How GitLab.com inspired GitLab Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/)\n",[724,703,482,9],{"slug":2920,"featured":6,"template":680},"there-is-no-mlops-without-devsecops","content:en-us:blog:there-is-no-mlops-without-devsecops.yml","There Is No Mlops Without Devsecops","en-us/blog/there-is-no-mlops-without-devsecops.yml","en-us/blog/there-is-no-mlops-without-devsecops",{"_path":2926,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2927,"content":2932,"config":2938,"_id":2940,"_type":14,"title":2941,"_source":16,"_file":2942,"_stem":2943,"_extension":19},"/en-us/blog/top-10-gitlab-technical-blogs-of-2023",{"title":2928,"description":2929,"ogTitle":2928,"ogDescription":2929,"noIndex":6,"ogImage":2256,"ogUrl":2930,"ogSiteName":693,"ogType":694,"canonicalUrls":2930,"schema":2931},"Top 10 GitLab technical blogs of 2023","2023 was a big year! Catch up on expert insights into DevSecOps, AI, CI/CD, and more.","https://about.gitlab.com/blog/top-10-gitlab-technical-blogs-of-2023","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top 10 GitLab technical blogs of 2023\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2024-01-09\",\n      }",{"title":2928,"description":2929,"authors":2933,"heroImage":2256,"date":2935,"body":2936,"category":1156,"tags":2937},[2934],"Sandra Gittlen","2024-01-09","2023 brought fresh insights from experts across GitLab and beyond —  all of them focused on the challenges and opportunities facing DevSecOps teams. From Lockheed Martin to CARFAX, organizations are trying to understand and unlock the power of technologies such as artificial intelligence (AI), CI/CD, security automation, and more. Our experts provided tips, best practices, and tutorials to use throughout the software development lifecycle.\n\nHere are the top 10 technical blogs from what was an incredible year in DevSecOps innovation.\n\n**1. [Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment](https://about.gitlab.com/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment/)**\nLooking for a smooth transition from Jenkins to GitLab? Follow this step-by-step tutorial to learn how GitLab's integrated CI/CD capabilities help deliver high-quality software faster.\n\n**2. [U.S. Navy Black Pearl: Lessons in championing DevSecOps](https://about.gitlab.com/blog/u-s-navy-black-pearl-lessons-in-championing-devsecops/)**\nSigma Defense's director of engineering details what it's like to manage the U.S. Navy's Black Pearl, which uses GitLab as its DevSecOps platform. The DevSecOps champion relays his experience implementing DevSecOps and the benefits of that decision.\n\n**3. [Quickstart guide for GitLab Remote Development workspaces](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/)**\nEnabling developers to work in their preferred environments empowers DevSecOps teams to build and deliver software more efficiently. With these quickstart instructions, developers can create a workspace, use the Web IDE Terminal to install dependencies or start their server, and view their running application.\n\n**4. [Introducing the GitLab CI/CD Catalog Beta](https://about.gitlab.com/blog/introducing-the-gitlab-ci-cd-catalog-beta/)**\nCI/CD catalogs are a game-changer, allowing developers to discover, integrate, and share pre-existing CI/CD components with ease. This tutorial shows how to get the most from this new DevSecOps platform feature.\n\n**5. [Combine GitLab Flow and GitLab Duo for a workflow powerhouse](https://about.gitlab.com/blog/gitlab-flow-duo/)**\nGitLab Flow and GitLab Duo can help organizations achieve significant improvements in end-to-end workflow efficiency that can lead to higher levels of productivity, deployment frequency, code quality and overall security, and production resiliency and availability. Find out how with this step-by-step guide.\n\n**6. [Efficient DevSecOps workflows: Hands-on python-gitlab API automation](https://about.gitlab.com/blog/efficient-devsecops-workflows-hands-on-python-gitlab-api-automation/)**\nThe python-gitlab library is a useful abstraction layer for the GitLab API. Dive into hands-on examples and best practices in this tutorial.\n\n**7. [Building GitLab with GitLab: Why there is no MLOps without DevSecOps](https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops/)**\nAt GitLab, we believe in the power of MLOps, especially when combined with DevSecOps. So follow along as our data scientists adopt DevSecOps practices and enjoy the benefits of automation, repeatable workflows, standardization, and automatic provisioning of infrastructure.\n\n**8. [Explore the Dragon Realm: Build a C++ adventure game with a little help from AI](https://about.gitlab.com/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions/)**\nReaders are invited to create a mystical world while learning how to integrate AI into their coding environment. This tutorial demonstrates how to use GitLab Duo Code Suggestions to create a text-based adventure game, including magical locations to visit and items to procure, using C++. \n\n**9. [How GitLab's Red Team automates C2 testing](https://about.gitlab.com/blog/how-gitlabs-red-team-automates-c2-testing/)**\nThe GitLab Red Team conducts security exercises that simulate real-world threats. They apply professional development practices to using the same open source C2 tools as threat actors. In this tutorial, the GitLab Red Team shares how they implement continuous testing for the Mythic framework, their design philosophy, and a public project that can be forked for use by other Red Teams.\n\n**10. [Building GitLab with GitLab: How GitLab.com inspired Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/)**\nThe design of GitLab Dedicated, our single-tenancy SaaS version of the DevSecOps platform, came from the lessons learned while building GitLab.com. In this peek behind the curtains, learn the considerations that sparked different decisions regarding automation, databases, monitoring, availability, and more – and what the outcome was.\n\nSign up for the GitLab newsletter using the form to the right to receive the latest blogs right in your inbox.\n",[9,109,482,703,678,724],{"slug":2939,"featured":91,"template":680},"top-10-gitlab-technical-blogs-of-2023","content:en-us:blog:top-10-gitlab-technical-blogs-of-2023.yml","Top 10 Gitlab Technical Blogs Of 2023","en-us/blog/top-10-gitlab-technical-blogs-of-2023.yml","en-us/blog/top-10-gitlab-technical-blogs-of-2023",{"_path":2945,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2946,"content":2951,"config":2956,"_id":2958,"_type":14,"title":2959,"_source":16,"_file":2960,"_stem":2961,"_extension":19},"/en-us/blog/top-10-ways-machine-learning-may-help-devops",{"title":2947,"description":2948,"ogTitle":2947,"ogDescription":2948,"noIndex":6,"ogImage":2892,"ogUrl":2949,"ogSiteName":693,"ogType":694,"canonicalUrls":2949,"schema":2950},"Top 10 ways machine learning may help DevOps","Is machine learning part of your DevOps plan? Here are some ways ML could fit right in.","https://about.gitlab.com/blog/top-10-ways-machine-learning-may-help-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top 10 ways machine learning may help DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-02-14\",\n      }",{"title":2947,"description":2948,"authors":2952,"heroImage":2892,"date":2953,"body":2954,"category":2085,"tags":2955},[804],"2022-02-14","_This post is meant as a general introduction to DevOps and machine learning, but does not represent GitLab’s roadmap with ModelOps. Read more about [our ModelOps plans](/blog/introducing-modelops-to-solve-data-science-challenges/)._\n\nLike a superhero’s cape, machine learning can enhance the innate powers of your DevOps program. \n\nYes, it’s early days, and no, machine learning can’t do everything you may want it to – yet. But if you [start using ML tools now](/topics/devops/the-role-of-ai-in-devops/), you’ll be poised to make it a full-fledged participant in your DevOps team as the technology continues to mature. Here are some things ML can help with today.\n\n1. **Make sense of your test data.** Whether it’s regression, unit, functional, or user acceptance testing, ML can help sort through the data generated from those tests, find patterns, figure out the coding problems that caused any bugs, and alert the troops. \n\n2. **Manage your help-desk alerts.** You can teach ML about the factors that make up different types of alerts and automatically route alerts to the best-qualified (mostly human) problem-solver, be it the service desk or a networking guru. Some ML systems can also fix problems without human intervention, based on rules you create.\n\n3. **Put the security into “DevSecOps.”** ML algorithms can, in real time, look through the massive amount of information generated from your security software and network logs and determine if there’s a breach long before a human could. The ML software compares the usual network-traffic baseline to what it’s seeing currently and detects when there’s an attack, or it can tell you if the amount of code in an app or system has suddenly grown to double its size when it shouldn’t have. ML can also triage the problems it finds, as well as take actions to correct security issues based on your guidelines. Further, ML tools can also help ensure your governance rules are followed and create a detailed audit trail.\n\n4. **Gather user requirements.** Natural language processing has come a long way, and can collect, validate, and track documents to streamline the process of figuring out what users are asking for. The technology can also help detect incomplete requirements or wonky timelines and can translate user wants and needs into highly technical project requirements. This makes the entire project-management process more efficient.\n\n5. **Help with pesky dev details.** No, not to replace developers, of course – at least not yet. But ML can learn from past apps you’ve created to recommend security guardrails and how to make software scale and perform better, among other things. Developers definitely see this trend coming, and in [GitLab’s 2021 Global DevSecOps Survey](/developer-survey/), around a third said that an understanding of AI or ML is the most important skill for their future careers. ML-powered code completion tools are already on the market, which provide suggestions for app developers.\n\n6. **Automate testing and create test data.** ML can automatically create the tests you need for QA and the test cases they’re based on, generate and manage test data, and automate code reviews. Natural language processing can help you review test cases and eliminate duplicates, as well as identify gaps in test coverage. Teams will continue to use machine learning models to [make test automation smarter](https://www.forrester.com/blogs/predictions-2021-software-developers-face-mounting-pressure/) , Forrester Research predicts.\n\n7. **Reduce complexity and allow better communication throughout the software chain.** ML can smooth out the rough edges among teams responsible for different parts of the process, and act as an Esperanto of sorts to allow people to speak to each other using the same language. No more, “It worked on my machine.” \n\n8. **Save time on manual provisioning.** Sure the cloud makes this easier, but ML can provision what it thinks you’ll need before you actually need it. \n\n9. **Improve software and product quality.** ML can help find issues like resource leaks, wasted CPU cycles, and other problems, so you can optimize your code before it hits production. At Facebook, [a bug detection tool](https://www2.deloitte.com/us/en/insights/focus/signals-for-strategists/ai-assisted-software-development.html/#:~:text=AI%20is%20helping%20to%20make%20better%20software%20Professionals%20are%20using,in%20design,%20development,%20and%20deployment&text=Artificial%20intelligence%20isn't%20writing,develop%20and%20test%20custom%20software.) predicts defects and suggests remedies that prove correct 80% of the time, Deloitte reports. And the IEEE ran a study from Google X about an ML method that [predicts failures of individual components](https://ieeexplore.ieee.org/document/7448033) that was “far more accurate than the traditional MTBF approach.” \n\n10. **Integrate your workflows and allow continuous improvement.** Some DevOps teams are using ML to analyze all development, operational, and test tools to find any gaps, as well as where pieces of the pipeline need to be better integrated and where APIs are still needed. ML algorithms can help teams figure out why some projects go very well, and others don’t. You can use ML to monitor your monitors and make sure they’re fully operational. Further, ML continues to learn from its training models – both the ones you provide and those it learns on its own as it goes – to continue to help you provide better products and services over time. And when you get down to it, isn’t that the whole point of technology?\n\n_Our [2022 Global DevSecOps Survey](/developer-survey/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._",[849,786,1894,9],{"slug":2957,"featured":6,"template":680},"top-10-ways-machine-learning-may-help-devops","content:en-us:blog:top-10-ways-machine-learning-may-help-devops.yml","Top 10 Ways Machine Learning May Help Devops","en-us/blog/top-10-ways-machine-learning-may-help-devops.yml","en-us/blog/top-10-ways-machine-learning-may-help-devops",{"_path":2963,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2964,"content":2969,"config":2974,"_id":2976,"_type":14,"title":2977,"_source":16,"_file":2978,"_stem":2979,"_extension":19},"/en-us/blog/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo",{"title":2965,"description":2966,"ogTitle":2965,"ogDescription":2966,"noIndex":6,"ogImage":861,"ogUrl":2967,"ogSiteName":693,"ogType":694,"canonicalUrls":2967,"schema":2968},"Top tips for efficient AI-powered Code Suggestions with GitLab Duo","Explore best practices  for using Code Suggestions and how to combine it with our other AI features to greatly improve the developer experience (includes real-world exercises).","https://about.gitlab.com/blog/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top tips for efficient AI-powered Code Suggestions with GitLab Duo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2024-06-11\",\n      }",{"title":2965,"description":2966,"authors":2970,"heroImage":861,"date":2971,"body":2972,"category":10,"tags":2973},[1217],"2024-06-11","[GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI-powered features, provides a unique opportunity to make your DevSecOps workflows more efficient. To make the most of GitLab Duo requires hands-on practice and learning in public together. This tutorial centers on GitLab Duo Code Suggestions and provides tips and tricks, learned best practices, and some hidden gems (including how to pair Code Suggestions with our other AI features for even more efficiency). You'll also discover how AI greatly improves the developer experience.\n\nThe best practices, tips, and examples in this article have been created from scratch and are included in the [GitLab Duo documentation](https://docs.gitlab.com/ee/user/gitlab_duo/index.html) and [GitLab Duo prompts project](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts), maintained by the GitLab Developer Relations team. Bookmark this page, and navigate into the respective chapters at your convenience.\n\nWhat you'll learn:\n\n1. [Why use GitLab Duo Code Suggestions?](#why-use-gitlab-duo-code-suggestions%3F)\n1. [Start simple, refine prompts](#start-simple-refine-prompts)\n1. [Practice, practice, practice](#practice-practice%2C-practice)\n    - [Fix missing dependencies](#fix-missing-dependencies)\n    - [Boilerplate code: Optimized logging](#boilerplate-code-optimized-logging)\n    - [Utility helper functions, well tested](#utility-helper-functions-well-tested)\n    - [Generate regular expressions](#generate-regular-expressions)\n1. [Re-trigger Code Suggestions](#re-trigger-code-suggestions)\n    - [Common keyboard combinations to re-trigger Code Suggestions](#common-keyboard-combinations-to-re-trigger-code-suggestions)\n    - [Stuck in the middle of suggestions](#stuck-in-the-middle-of-suggestions)\n    - [Code Suggestions stopped](#code-suggestions-stopped)\n1. [Code Suggestions vs, code generation](#code-suggestions-vs.-code-generation)\n    - [Start with a comment on top for code generation](#start-with-a-comment-on-top-for-code-generation)\n    - [Intent detection for code suggestions and generation](#intent-detection-for-code-suggestions-and-generation)\n    - [Tell a story for efficient code generation](#tell-a-story-for-efficient-code-generation)\n    - [Generate regular expressions](#generate-regular-expressions)\n    - [Iterate faster with code generation](#iterate-faster-with-code-generation)\n    - [Practical code generation: Cloud-native observability](#practical-code-generation-cloud-native-observability)\n1. [Take advantage of all GitLab Duo features](#take-advantage-of-all-gitlab-duo-features)\n    - [Combine Chat with Code Suggestions](#combine-chat-with-code-suggestions)\n    - [Use Chat to generate build configuration](#use-chat-to-generate-build-configuration)\n    - [Use Chat to explain potential vulnerabilities](#use-chat-to-explain-potential-vulnerabilities)\n    - [Combine vulnerability resolution with Code Suggestions](#combine-vulnerability-resolution-with-code-suggestions)\n1. [More tips](#more-tips)\n    - [Verify code quality and security](#verify-code-quality-and-security)\n    - [Learn as a team, and understand AI's impact](#learn-as-a-team-and-understand-ai-impact)    \n    - [Development is a marathon, not a sprint](#development-is-a-marathon-not-a-sprint)\n    - [Contribute using GitLab Duo](#contribute-using-gitlab-duo)\n1. [Share your feedback](#share-your-feedback)\n\n## Why use GitLab Duo Code Suggestions?\n\nConsider these two scenarios:\n\n1. As a senior developer, you have the confidence in your ability with various programming languages to write new source code, review existing code, design resilient architectures, and implement new projects. However, getting familiar with the latest programming language features requires time, research, and a change of habits. So how can you quickly learn about new language feature additions that could make your code even more robust or use resources more sustainably?\n\n    - As a personal example, I learned the C++03 standard, later C++11 and never really touched base on C++14/17/20/23 standards. Additionally, new languages such as [Rust](https://about.gitlab.com/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/) came around and offered better developer experiences. What now?\n\n2. As a new engineer, it can be challenging to navigate new projects, get familiar with a new programming language, understand specific algorithms, and find the documentation for structures, interfaces, and other technical components. New engineers are also learning under pressure, which often leads to errors and roadblocks along the way. There is no time for digging into best practices.\n\n    - I, myself, never really learned frontend engineering, just some self-taught HTML, CSS, and JavaScript. Adapting into frontend frameworks such as VueJS after a decade feels overwhelming, and I have little time to learn.\n\nThese scenarios show how hard it can be to keep up with the latest programming languages, best practices, and other key information. [GitLab Duo Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/), which predictively completes code blocks, defines function logic, generates tests, and proposes common code like regex patterns – all in your coding environment. Code Suggestions provides the AI assistance necessary to learn what you need to know while staying in your development flow.\n\n> Live demo! Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Register today](https://about.gitlab.com/seventeen/)!\n\n## Start simple, refine prompts\n\nMy own GitLab Duo adoption journey started with single-line code comments, leading to not-so-great results at first. \n\n```\n# Generate a webserver\n\n// Create a database backend\n\n/* Use multi-threaded data access here */\n```\n\nAfter experimenting with different contexts, and writing styles, I found that code generation out of refined comments worked better. \n\n```\n# Generate a webserver, using the Flask framework. Implement the / URL endpoint with example output.\n\n// Create a database backend. Abstract data handlers and SQL queries into function calls.\n\n/* Use multi-threaded data access here. Create a shared locked resource, and focus on supporting Linux pthreads. */\n```\n\nCode comments alone won't do the trick, though. Let's explore more best practices.\n\n## Practice, practice, practice \n\nFind use cases and challenges for your daily workflows, and exclusively use GitLab Duo. It can be tempting to open browser search tabs, but you can also solve the challenge in your IDE by using GitLab Duo. Here are some examples:\n\n1. Fix missing dependencies (which always cause build/execution failures).\n1. If you're missing logging context, let Code Suggestions auto-complete started function calls, including `print` statements.\n1. Generate common methods and attributes for object-oriented design patterns (e.g. getter/setter methods, `toString()` and object comparison operators, object inheritance, etc.).\n1. Identify the function that generates random crashes. Use Code Suggestions to implement a new function with a different algorithm.\n1. If you encounter application cannot be compiled or executed, cryptic error, ask GitLab Duo Chat about it.\n1. Learn about existing (legacy) code, and strategies to document and refactor code into modern libraries. Start a v2 of an application with a new framework or programming language, helping solve technical debt.\n1. Prevent operations and security issues in Git history by detecting them before they occur (e.g. performance, crashes, security vulnerabilities).\n\nThink of the most boring - or most hated - coding task, and add it to the list above. My least favorite tasks are attribute getter/setter methods in C++ classes (as can be seen in the video below), immediately followed by regular expressions for email address format.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Z9EJh0J9358?si=QGvQ6mXxPPz4WpM0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nIt can also help to use Code Suggestions in different programming languages, for example focusing on backend and frontend languages. If you are experienced in many languages, take a look into languages that you have not used in a while, or look into learning a new programming language such as [Python](https://about.gitlab.com/blog/learning-python-with-a-little-help-from-ai-code-suggestions/) or [Rust](https://about.gitlab.com/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/). \n\nWhen you adopt Code Suggestions into a fast auto-completion workflow, it can happen without any interruption. The suggested code is greyed out and optional, depending on the user interface – for example, VS Code. This means that it will not distract you from continuing to write source code. Try using Code Suggestions on your own by familiarizing yourself with how suggestions are shown, how you can fully or partially accept them, and soon they will become optional help to write better code. \n\n![Image with code suggestions greyed out](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_java_springboot_class_methods_tostring.png)\n\n### Fix missing dependencies\n\nAfter building or running source code, missing dependency errors might be logged and prevent further execution and testing. The following example in Go shows an error from `go build`, where the source code did not import any dependencies yet. A manual approach can be collecting all listed dependencies, running a unique sort on them, and adding them into the source code file, as shown below.\n\n![Go build failed - missing dependencies](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_build_failed_missing_deps.png)\n\nBut what if GitLab Duo knows about the file context and missing dependencies already? Navigate into the top section and add a comment, saying `// add missing imports` and wait for Code Suggestions.\n\n![GitLab Duo Code Suggestions - go build failed missing dependencies suggested fix](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_build_failed_missing_deps_suggested_fix.png)\n\nRunning `go build` again results in success, and the source code can be tested and run.\n\n![Go build failed - missing dependencies fixed](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_build_failed_missing_deps_fixed.png)\n\n### Boilerplate code: Optimized logging\n\n*Q: Logging – and more observability data with metrics and traces – can be hard and tedious to implement. What is the most efficient way to implement them that does not impact the application performance or cause bugs?*\n\n*A: Use Code Suggestions to generate logging function calls, and refactor the code into robust observability instrumentation library abstractions. This method can help to prepare the code for later integration with [OpenTelemetry](https://docs.gitlab.com/ee/development/stage_group_observability/gitlab_instrumentation_for_opentelemetry.html), for example.*\n\nExample for a logging class in Ruby:\n\n```ruby\n# Create Logging utility class\n# Define default log level as attribute\n# Add method for logging, inputs: level, app, message\n# Print the data with formatted date and time in syslog format\n\n# Potential AI-generated code below\nclass Logging\n  attr_accessor :log_level\n\n  def log(level, app, message)\n    time = Time.now.strftime(\"%b %d %H:%M:%S\")\n    puts \"#{time} #{app}[#{Process.pid}]: #{level} - #{message}\"\n  end\nend\n\n# Instantiate class and test the log method\n\n# Potential AI-generated code below\nlogger = Logging.new\nlogger.log_level = :debug\n\nlogger.log(:info, 'MyApp', 'Application started - info')\nlogger.log(:debug, 'MyApp', 'Application started - debug')\nlogger.log(:error, 'MyApp', 'Application started - error')\nlogger.log(:fatal, 'MyApp', 'Application started - fatal')\nlogger.log(:warn, 'MyApp', 'Application started - warn')\n```\n\n### Utility helper functions, well tested\n\n*Q: The programming language does not provide basic functions in the standard library. I'm tempted to open my browser to add string manipulation and regular expression parser functions.*\n\n*A: Create a new file called `utility.{language-suffix}` and add a code comment on top. Instruct Code Suggestions to generate a string manipulation function ().*\n\nExample for a string manipulation method in Python:\n\n```python\n# Create a function to search for a string in a string, return the index\n# Optionally remove search string, and return modified string\n# Test the function on the string \"Cool programming languages: C++, Rust, Python\" - Remove the string \"C++\"\n\n# Potential AI-generated code below\ndef search_and_remove(text, search):\n    index = text.find(search)\n    if index != -1:\n        return index, text[:index] + text[index+len(search):]\n    else:\n        return None, text\n\ntext = \"Cool programming languages: C++, Rust, Python\"\nindex, new_text = search_and_remove(text, \"C++\")\nprint(index, new_text)\n```\n\n**Async exercise:** Ask GitLab Duo Chat how to add tests in Python, select the source code, and use the `/tests` slash command. \n\nA similar example can be implemented in Go, creating utility functions for unique integer values in an array, or the sum of all even values. Start with defining the project structure through `go mod init gitlab.com/group/project` and create the `array.go` file. Define the `package` and start with the first code comment to generate the functions.\n\n```go\npackage array \n\n// Create a function that returns unique values from an integer array\n\n// Possible AI-generated code\nfunc unique(ints []int) []int {\n    occurred := map[int]bool{}\n    result := []int{}\n    for _, i := range ints {\n        if !occurred[i] {\n            occurred[i] = true\n            result = append(result, i)\n        }\n    }\n    return result\n}\n\n// Create a function that returns the sum of all even numbers in an integer array\n\n// Possible AI-generated code\nfunc sumEvens(ints []int) int {\n    var sum int\n    for _, i := range ints {\n        if i%2 == 0 {\n            sum += i\n        }\n    }\n    return sum\n}\n```\n\n**Async exercise**: Create more utility helper functions in dedicated libraries, and use Chat to select and generate `/tests`. For the Go example, you can inspect potential solutions in the `go/utility/array_test.go` file in the [GitLab Duo Prompts project](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts). Build and test the code using `go build && go test`.\n\n### Generate regular expressions\n\nDevelopers' favorite one liners, never touched again. `git blame` knows very well but might not be able to provide enough context. GitLab Duo can help with regular expressions creation, explanation, and refactoring, in the following example:\n\n*Q: My regular expressions for parsing IPv6 and IPv4 addresses do not work. What's the best approach to solve this?*\n\n*A: Use Code Suggestions comments to generate examples using these regex types. Combine the questions with Chat, and ask for more examples in different languages. You can also select the existing source, and use a refined prompt with `/refactor using regular expressions` in the Chat prompt.*\n\n**Async exercise**: Choose your favorite language, create a function stub that checks IPv6 and IPv4 address strings for their valid format. Trigger Code Suggestions to generate a parsing regular expression code for you. Optionally, ask Chat how to refine and refactor the regex for greater performance.\n\nI chose TypeScript, a language on my personal learning list for 2024: `// Generate a TypeScript function which parses IPv6 and IPv4 address formats. Use regular expressions`.\n\n![Code Suggestions - typescript utility parse ip address regex](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_typescript_utility_parse_ip_address_regex.png)\n\n![Code Suggestions typescript - utility parse ip address regex tests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_typescript_utility_parse_ip_address_regex_tests.png)\n\n## Re-trigger Code Suggestions\n\nYou can trigger Code Suggestions by pressing the `enter` or `space` key, depending on the context. In VS Code and the GitLab Web IDE, the GitLab Duo icon will appear in the same line, and at the bottom of the window.\n\nIf you accepted a suggestion, but actually want to try a different suggestion path, select the code, delete the line(s) and start over.\n\n> **Tip:** Different keystrokes and strategies for Code Suggestions are recorded in this video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ORpRqp-A9hQ?si=CmA7PBJ9ckWsvjO3\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Common keyboard combinations to re-trigger Code Suggestions\n\nEspecially in the early adoption phase of Code Suggestions, you'll need to practice to get the best results from comments, existing code style, etc., put into context.\n\nA common keystroke pattern for triggering suggestions can be\n\n1. Press `Enter` and wait for the suggestion.\n1. Press `Space` followed by `Backspace` to immediately delete the whitespace again, or\n1. Press `Enter` to re-trigger the suggestion. `Backspace` to delete any leftover new lines.\n\nWhen a suggestion makes sense, or you want to see how far you can get:\n\n1. Continue pressing `Tab` to accept the suggestion.\n1. Add a space or press `Enter` to open a new scope for triggering a new suggestion.\n1. Continue accepting suggestions with `Tab`. \n\nNote that generative AI sometimes ends up in a loop of suggesting similar code paths over and over again. You can trigger this behavior by inserting test data into an array, using strings and numbers in a sorted order or by generating different API endpoints, as it tries to guess which other endpoints could be helpful. When this happens, break the acceptance flow, and continue writing code as normal.\n\n### Stuck in the middle of suggestions\n\nSometimes, the code suggestions may stop in the middle of a variable, function, etc. definition. If you are unsure about the syntax, or want to restart the code suggestions:\n\n1. Delete the last character(s) or the entire line, using `Backspace`.\n1. Alternatively, use `shift cursor left` (select characters) or `cmd shift cursor left` (select entire line), followed by `Backspace`.\n1. Move the cursor into the line above, and press `Enter` to force a Code Suggestions trigger again.\n\n### Code Suggestions stopped\n\nWhen Code Suggestions stops, there can be multiple reasons:\n\n1. The current file scope ends – for example, a `main()` function has been generated and closed.\n1. There could be connection problems to the GitLab instance (self-managed) or GitLab.com (SaaS, [Dedicated](https://about.gitlab.com/dedicated/)). Follow the [troubleshooting documentation](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/troubleshooting.html).\n\n## Code suggestions vs. code generation\n\nCode suggestions \"come as you go\" while writing code, and help with completing line(s). Code generation on the other hand requires more context to create entire code blocks, consisting of functions, algorithms, classes, etc. \n\nThe following sections discuss both methods, and how to get started with a practical example. \n\n### Code suggestions flow with comments\n\nUse your natural programming flow, and stop to adapt to adding code comments when helpful for context and better suggestions. You can accept code suggestions using the `Tab` key, or selectively accept words using the `cmd cursor right` keyboard shortcut.\n\nThe following new challenge implements a simple Linux statistics tool in C, mimicking the functionality of `iostat`, `vmstat` and `du` CLI commands on Linux. Sometimes, these low-level metrics come in handy for presenting application metrics, or otherwise help with debugging when requesting support data from customers.\n\nCreate a new application directory called `linux_stat` and `main.c` file, and let Code Suggestions complete everything else. First, start with a simple code comment block:\n\n```c\n// Create a Linux statistic tool\n// Collect IO, performance, disk usage, TCP latency\n// Print summaries\n```\n\nNext, press `Enter` to trigger the creation of the `main()` method. If not automatically created, force it through a new code comment.\n\n```c\n// Create main function to collect data\n```\n\nIn my attempts, generative AI did not immediately implement all functionality. Instead, it added a `TODO` comment into the following code block.\n\n```c\nint main() {\n    struct stat statbuf;\n\n    if (stat(\"/etc\", &statbuf) != 0) {\n        perror(\"stat\");\n        return 1;\n    }\n\n    printf(\"IO Block Size: %ld\\n\", statbuf.st_blksize);\n\n    // TODO: add CPU usage, disk usage, network latency measurements\n\n    return 0;\n}\n```\n\nMove the cursor after the `TODO` comment, and continue with a new comment to collect CPU usage:\n\n```c\n    // Collect CPU usage\n\n    // Example for AI-generated code below\n    double cpu_usage;\n\n    FILE* stat_file = fopen(\"/proc/stat\", \"r\");\n    if (stat_file == NULL) {\n        perror(\"fopen\");\n        return 1;\n    }\n\n    // Parse /proc/stat to get CPU usage\n    fscanf(stat_file, \"cpu %lf\", &cpu_usage);\n\n    printf(\"CPU Usage: %.2f%%\\n\", cpu_usage * 100);\n\n    fclose(stat_file);\n```\n\nThe generated code might not be 100% accurate, since measuring CPU usage on Linux requires taking samples over a defined time range. Thus, the caller needs to calculate the real CPU utilization from all values in `/proc/stat`, including [`system, nice, user, idle, iowait`, etc.](https://www.kernel.org/doc/Documentation/filesystems/proc.txt).\n\n**Async challenge:** Optimize the collection process with average calculation.\n\nRepeat the steps for disk usage and network latency. The code comment is written by you, the C source code is generated with GitLab Duo.\n\n```c\n    // Collect disk usage\n\n    // Collect network latency\n\n    // Example for AI-generated code below\n    int sockfd = socket(AF_INET, SOCK_STREAM, 0);\n\n    struct sockaddr_in servaddr;\n    servaddr.sin_family = AF_INET;\n    servaddr.sin_port = htons(80);\n    inet_pton(AF_INET, \"8.8.8.8\", &servaddr.sin_addr);\n```\n\nThe generated code may use public IP addresses to test network latency. Change the IPv4 address to GitLab.com's current address. \n\n```c\n    inet_pton(AF_INET, \"172.65.251.78\", &servaddr.sin_addr);\n```\n\nThe generated source code added more header dependencies. Navigate into the top section of the file, and add a comment to include all required headers:\n\n```c\n// Import required headers\n```\n\nBuild and run the application on Linux, example on Ubuntu 22 LTS:\n\n```shell\n# Install compiler\napt install gcc\n\n# Build\ngcc main.c -o linux_stat\n\n# Build: If the math.h header is included, linking against the library is needed, too\ngcc linux_stat.c -o linux_stat -lm\n\n# Run the tool\n./linux_stat\n```\n\n**Bonus:** Change the code to use IPv6, and check the GitLab.com IPv6 address again (`dig gitlab.com AAAA +short`).\n\n```c\n    // Collect network latency\n\n    // Use IPv6 \n\n    // Example for AI-generated code below\n    struct sockaddr_in6 servaddr;\n    servaddr.sin6_family = AF_INET6;\n    servaddr.sin6_port = htons(80);\n    //inet_pton(AF_INET6, \"2001:db8::1\", &servaddr.sin6_addr);\n    inet_pton(AF_INET6, \"2606:4700:90:0:f22e:fbec:5bed:a9b9\", &servaddr.sin6_addr);\n\n    int sockfd = socket(AF_INET6, SOCK_STREAM, 0);\n```\n\n![C Linux stat tests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_c_linux_stat_tests.png)\n\nThe full working source code is available in the [GitLab Duo Prompts project](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts) in the directory for C code.\n\n**Async exercise:** Refactor the C code into Rust, using only GitLab Duo. Start by selecting the source code, and use the Duo Chat prompt `/refactor into Rust`. \n\n> **Tip:** Thoughtful code comments make the source code more readable, too. This helps new team members with onboarding, site reliability engineers with debugging production incidents, and open source contributors with [merging their first MRs](https://handbook.gitlab.com/handbook/marketing/developer-relations/contributor-success/community-contributors-workflows/#first-time-contributors).\n\n### Start with a comment on top for code generation\n\nSource code can be organized in multiple files. Whether you start with a new application architecture, or refactor existing source code, you can take advantage of code generation with GitLab Duo.\n\nStart with a comment block on top, and make it a step-by-step description. You can also break longer comments into multiple lines, revisiting the examples in this article. This pattern also helps to think about the requirements, and can help refining the prompts. \n\n```diff\n# Generate a webserver, using the Flask framework. \n# Implement the / URL endpoint with example output.\n+# Add an endpoint for Promtheus metrics\n\n// Create a database backend. \n// Abstract data handlers and SQL queries into function calls.\n+// Use PostgreSQL as default backend, and SQLite for developers as fallback.\n\n/* \nUse multi-threaded data access here.\nCreate a shared locked resource, and focus on supporting Linux pthreads. \n+Abstract the thread creation/wait procedures into object-oriented classes and methods.\n*/\n```\n\nMore code generation prompts for [supported programming languages](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html) are available in the [GitLab Duo Use Cases documentation](https://docs.gitlab.com/ee/user/gitlab_duo/use_cases.html#code-generation-prompts).\n\n### Intent detection for code suggestions and generation\n\nCode Suggestions, depending on the GitLab Language Server in your IDE, will parse and detect the intent and offer code completion suggestions in the same line or code generation.\n\nThe technology in the background uses TreeSitter to parse the code into an [AST](https://en.wikipedia.org/wiki/Abstract_syntax_tree), and determine whether the scope is inside a code comment block (generation), or inside the source code (completion). This detection needs to be executed fast on the client IDE, and proves to be a great use case for [WebAssembly](https://webassembly.org/). You can learn more in [this epic](https://gitlab.com/groups/gitlab-org/-/epics/11568), and the following video, which provides a look into the GitLab Language Server powering Code Suggestions:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/VQlWz6GZhrs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Tell a story for efficient code generation\n\nCode generation is art. Tell the story, and AI-powered GitLab Duo can assist you. \n\nThe following example aims to implement an in-memory key-value store in Go, similar to Redis. Start with a description comment, and trigger Code Suggestions by continuing with a new line and pressing `Enter`.\n\n```golang\n// Create an in-memory key value store, similar to Redis \n// Provide methods to\n// set/unset keys\n// update values\n// list/print with filters\n```\n\nWe can be more specific – which methods are required for data manipulation? Instruct Code Suggestions to generate methods for setting keys, updating values, and listing all contained data.\n\n```golang\n// Create an in-memory key value store, similar to Redis \n// Provide methods to\n// set/unset keys\n// update values\n// list/print with filters\n```\n\nAccept all suggestions using the `Tab` key. As a next step, instruct Code Suggestions to create a `main` function with test code.\n\n```golang\n// Create a main function and show how the code works\n```\n\nIf the test data is not enough, refine the generated code with a focus on extreme test cases.\n\n> **Tip:** You can use the same method for refined [Chat prompts and test generation](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#write-tests-in-the-ide), `/tests focus on extreme test cases`.\n\n```golang\n// Add more random test data, focus on extreme test cases\n```\n\n![Code Suggestions - go kv more test data](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_kv_more_test_data.png)\n\nThe full example, including fixed dependencies, is located in the [gitlab-duo-prompts project](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts) in the `code-suggestions/go/key-value-store` directory. Update the `main.go` file, and build and run the code using the following command: \n\n```shell\ngo build\n./key-value-store\n```\n\nThe first iteration was to create a standalone binary and test different implementation strategies for key-value stores. Commit the working code and continue with your GitLab Duo adoption journey in the next step.\n\n> **Tip:** New projects can benefit from Code Generation, and require practice and more advanced techniques to use code comments for prompt engineering. This method can also make experienced development workflows more efficient. Proof of concepts, new library introductions, or otherwise fresh iterations might not always be possible in the existing project and framework. Experienced developers seek to create temporary projects, and isolate or scope down the functionality. For example, introducing a database backend layer, and benchmarking it for production performance. Or, a library causing security vulnerabilities or license incompatibilities should be replaced with a different library, or embedded code functionality.\n\n### Iterate faster with code generation\n\nExperienced developers will say, \"There must be a key-value library in Go, let us not reinvent the wheel.\" Fortunately, Go is a mature language with a rich ecosystem, and awesome-go collection projects, for example [avelino/awesome-go](https://github.com/avelino/awesome-go), provide plenty of example libraries. Note: This possibility might not be the case for other programming languages, and requires a case-by-case review.\n\nWe can also ask GitLab Duo Chat first, `Which Go libraries can I use for key-value storage?`:\n\n![Chat - ask golang libraries kv](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_chat_ask_golang_libs_kv.png)\n\nAnd then refine the Code Suggestions prompt to specifically use the suggested libraries, for example, BoltDB.\n\n```diff\n// Create an in-memory key value store, similar to Redis \n// Provide methods to\n// set/unset keys\n// update values\n// list/print with filters\n+// Use BoltDB as external library\n```\n\nRepeat the pattern from above: Generate the source code functions, then ask GitLab Duo to create a main function with test data, and build the code. The main difference is external libraries, which need to be pulled with the `go get` command first. \n\n```shell\ngo get\ngo build\n```\n\nIf the source code build fails with missing dependencies such as `fmt`, practice using GitLab Duo again: Move the cursor into the `import` statement, and wait for the suggestion to add the missing dependencies. Alternatively, add a comment saying `Import all libraries`.\n\n![Code Suggestions - go kv external lib boltdb fix dependencies](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_kv_external_lib_boltdb_fix_deps.png)\n\nYou can also add more test data again, and verify how the functions behave: `// Add more random test data, focus on extreme test cases`. In the following example, an empty key causes the program to panic.\n\n![Code Suggestions - Go kv external lib boltdb test extreme cases panic](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_kv_external_lib_boltdb_test_extreme_cases_panic.png)\n\nThis example is a great preparation for test cases later on.\n\n### Practical code generation: Cloud-native observability\n\nThink of a client application in Go, which lists the current state of containers, pods, and services in a Kubernetes cluster, similar to the `kubectl get pods` command line. The Kubernetes project provides [Go libraries](https://pkg.go.dev/k8s.io/client-go/kubernetes) to programmatically interact with the Kubernetes APIs, interfaces, and object structures.\n\nOpen your IDE, and create a new Go project.\n\n> **Tip:** You can ask Chat how to do it - `How to start a Go project? Please show CLI command examples`. \n\nStart with a single comment on top of the `main.go` file, and describe the application purpose: Observability in Kubernetes.\n\n```golang\n// Create a client for Kubernetes observability\n```\n\nThink about the main requirements: Get access to Kubernetes, create context, namespace, and inspect the state. Additionally, instruct Code Suggestions to import packages and create a main package in the `main.go` file.\n\nFirst iteration:\n\n```golang\n// Create a client for Kubernetes observability\n// Inspect container, pod, service status and print an overview\n```\n\nThis might do unexpected things with hardcoding the access credentials, missing contexts, failing builds.\n\nSecond iteration:\n\n```golang\n// Create a client for Kubernetes observability\n// Create a function that\n// Read the kubernetes configuration file from the KUBECONFIG env var\n// Inspect container, pod, service status and print an overview\n```\n\nThis might not know about Kubernetes contexts and namespaces, thus leading to build errors or unexpected results.\n\nThird iteration:\n\n```golang\n// Create a client for Kubernetes observability\n// Create a function that\n// Read the kubernetes configuration file from the KUBECONFIG env var\n// Create kubernetes context, namespace default\n// Inspect container, pod, service status and print an overview\n```\n\nThis example hardcodes the Kubernetes context and default namespace to generate an initial foundation. Later iterations can read the namespace value from a command line parameter, or configuration file.\n\nThe final example can look like the following. In addition to the application functionality, it also instructs Code Suggestions to import all dependencies, and create a `main` package in `main.go`.\n\n```golang\n// Create a client for Kubernetes observability\n// Create a function that\n// Read the kubernetes configuration file from the KUBECONFIG env var\n// Create kubernetes context, namespace default\n// Inspect container, pod, service status and print an overview\n// Import necessary packages\n// Create main package\n```\n\n\u003Cdetails> Solution \n\u003Csummary>\n\n```golang\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/tools/clientcmd\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nfunc main() {\n\tkubeconfig := os.Getenv(\"KUBECONFIG\")\n\tconfig, err := clientcmd.BuildConfigFromFlags(\"\", kubeconfig)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tpods, err := clientset.CoreV1().Pods(\"\").List(context.TODO(), metav1.ListOptions{})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Printf(\"There are %d pods in the cluster\\n\", len(pods.Items))\n\n\t// Additional code to inspect services, containers, etc\n}\n```\n\n\u003C/summary>\n\u003C/details>\n\nExample output:\n\n![duo code suggestions - go k8s o11y output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_k8s_o11y_output.png)\n\n**Async exercise:** Complete the project with code for inspecting services, containers, etc., and export the findings to [OpenTelemetry](https://opentelemetry.io/).\n\n> **Tip:** Practice with the [GitLab Duo use cases: Code generation prompts](https://docs.gitlab.com/ee/user/gitlab_duo/use_cases.html#code-generation-prompts) in the documentation, and/or send merge requests with your working prompts.\n\nWhile recording a short video to highlight how code generation is working, another more refined source code was generated. You can inspect the differences in [this commit](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts/-/commit/a1a46de9789d4791f04b4df9f1a35d05b8e67568), and benefit from both solutions.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ORpRqp-A9hQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Take advantage of all GitLab Duo features \n\n### Combine Chat with Code Suggestions\n\nIn combination with [GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat/index.html), Code Suggestions becomes even more powerful. The following workflow illustrates the intersection of AI efficiency:\n\nWrite and generate new code using Code Suggestions. The source code will be verified through CI/CD automation, code quality tests, and security scanning. But what about the developer's knowledge?\n\n1. In your IDE, select the generated code portions and use the [`/explain` slash command](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#explain-code-in-the-ide) in the Chat prompt. You can even refine the prompt to `/explain with focus on algorithms`, or otherwise helpful scopes such as potential security or performance problems, etc.\n\n    - Continue writing and maintaining source code, but at some point code quality decreases and refactoring gets challenging. Ask GitLab Duo Chat for help.\n\n2. In your IDE, select the source code, and use the [`/refactor` slash command](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#refactor-code-in-the-ide) in the Chat prompt. You can refine the prompt to focus on specific design patterns (functions, object-oriented classes, etc.), `/refactor into testable functions` for example._\n\n    - After ensuring more readable code, tests need to be written. What are potential extreme cases, or random data examples for unit tests? Research and implementation in various frameworks can take time.\n\n3. In your IDE, select the source code, and use the [`/tests` slash command](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#write-tests-in-the-ide) in the Chat prompt. You can also refine the prompt to focus in specific test frameworks, scenarios, input methods, etc. \n\n    - Code quality and test coverage reports are green again. Focus on efficient DevSecOps workflows with Code Suggestions again. \n\nMore scenarios are described in the [GitLab Duo use cases documentation](https://docs.gitlab.com/ee/user/gitlab_duo/use_cases.html).\n\n### Use Chat to generate build configuration\n\nThe time-intensive research on getting started with a new project can be exhausting. Especially with different paths to do it right, or alternative frameworks, this can lead to more work than anticipated. Newer programming languages like Rust propose one way (Cargo), while Java, C++, etc. offer multiple ways and additional configuration languages on top (Kotlin DSL, CMake DSL, etc.).\n\nTake advantage of asking GitLab Duo how to start a project, generate specific configuration examples for build tools (e.g. `Please show a gradle.build example for Spring Boot`), and reduce the time to start developing, building, and testing source code.\n\n1. Java, Gradle, Spring Boot: `Please show a gradle.build example for Spring Boot`\n1. C++, CMake, clang: `Please show a basic CMake configuration file for C++17, using clang as compiler.`\n1. Python: `Please show how to initialize and configure a Python project on the CLI`\n1. Rust: `Please show how to initialize and configure a Rust project.`, followed by a refinement question: `Explain the structure of Cargo.toml`.\n1. Go: `Please show how to initialize and configure a Go project`. \n\n### Use Chat to explain potential vulnerabilities\n\nLet us assume that some PHP code was generated to create a web form. The code might be vulnerable to security issues.\n\n```php\n\u003C?php \n// Create a feedback form for user name, email, and comments\n// Render a HTML form\n\n$name = $_POST['name'];\n$email = $_POST['email'];\n$comments = $_POST['comments'];\n\necho '\u003Cform method=\"post\">';\necho '\u003Clabel for=\"name\">Name:\u003C/label>';\necho '\u003Cinput type=\"text\" id=\"name\" name=\"name\">';\n\necho '\u003Clabel for=\"email\">Email:\u003C/label>';\necho '\u003Cinput type=\"email\" id=\"email\" name=\"email\">';\n\necho '\u003Clabel for=\"comments\">Comments:\u003C/label>';\necho '\u003Ctextarea id=\"comments\" name=\"comments\">\u003C/textarea>';\n\necho '\u003Cinput type=\"submit\" value=\"Submit\">'; \necho '\u003C/form>';\n\n?>\n```\n\nSelect the source code, and [ask Chat to explain](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#explain-code-in-the-ide), using a refined prompt with `/explain why this code is vulnerable to bad security actors`. \n\n![Code Suggestions - Chat explains potential vulnerability](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_chat_explain_potential_vulnerability.png)\n\n> **Tip**: We are investigating and learning in the local developer environment. The vulnerable source code can be fixed before it reaches a Git push and merge request that trigger security scanning, which will unveil and track the problems, too. Learning about security vulnerabilities helps improve the developer experience.\n\n### Combine vulnerability resolution with Code Suggestions\n\nLets look into another example with an intentional [vulnerability resolution challenge](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/vulnerability-resolution/challenge-resolve-vulnerabilities), and see if we can use Code Suggestions in combination with vulnerability resolution. The linked project has been preconfigured with static application security testing (SAST) scanning. You can follow these steps to configure GitLab SAST by using the [SAST CI/CD component](https://gitlab.com/explore/catalog/components/sast) in the `.gitlab-ci.yml` CI/CD configuration file.\n\n```yaml\ninclude:\n  # Security: SAST (for vulnerability resolution)\n  - component: gitlab.com/components/sast/sast@1.1.0\n```\n\nAfter inspecting the vulnerability dashboard and details, you can use [vulnerability explanation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#vulnerability-explanation) to better understand the context and potential problems. [Vulnerability resolution](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/index.html#vulnerability-resolution) creates a rerge request with a proposed source code fix for a detected security vulnerability. \n\nSometimes, it can be necessary to refine the suggested code. Navigate into the [created MR](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/vulnerability-resolution/challenge-resolve-vulnerabilities/-/merge_requests/1), and either copy the Git branch path for local Git fetch, or open the Web IDE from the `Edit` button to continue in the browser. Navigate into the source code sections with the fixed code portions, and modify the code with a comment:\n\n```\n// refactor using safe buffers, null byte termination\n```\n\n![duo code suggestions - with vulnerability resolution proposal](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_with_vulnerability_resolution_proposal.png)\n\nAlternatively, you can also open Chat, select the source code and use the `/refactor` slash command.\n\n![duo code suggestions - with vulnerability resolution add duo chat refactor](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_with_vulnerability_resolution_add_duo_chat_refactor.png)\n\nA full example is available in the [GitLab Duo use cases documentation](https://docs.gitlab.com/ee/user/gitlab_duo/use_cases.html#explain-and-resolve-vulnerabilities). \n\nHere is a recording of that example:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Ypwx4lFnHP0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## More tips \n\n### Verify code quality and security\n\nMore generated code requires quality assurance, testing, and security measures. Benefit from all features on a DevSecOps platform:\n\n1. [CI/CD components](https://docs.gitlab.com/ee/ci/components/) and [pipeline efficiency](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html)\n1. [Code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html)\n1. [Code test coverage](https://docs.gitlab.com/ee/ci/testing/code_coverage.html)\n1. [Application security](https://docs.gitlab.com/ee/user/application_security/)\n1. [Observability](https://docs.gitlab.com/ee/operations/tracing.html)\n\n### Learn as a team, and understand AI impact\n\nAdapt and explore with dedicated team collaboration sessions, and record them for other teams to benefit from later. You can also follow the [GitLab Duo Coffee Chat playlist on YouTube](https://www.youtube.com/playlist?list=PL05JrBw4t0Kp5uj_JgQiSvHw1jQu0mSVZ).\n\nRead about AI impact metrics, including [How to put generative AI to work in your DevSecOps environment](https://about.gitlab.com/the-source/ai/how-to-put-generative-ai-to-work-in-your-devsecops-environment/) and the [Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/). Visit the [AI Transparency Center](https://about.gitlab.com/ai-transparency-center/) to learn more about data usage, transparency, and AI ethics at GitLab.\n\n### Development is a marathon, not a sprint\n\nSometimes, code suggestions might take longer to load, compared to local auto-completion features. Take this time as an advantage, and think about the current algorithm or problem you are trying to solve. Often, a secondary thought can lead to more refined ideas. Or you can take a short break to take a sip from your preferred drink, and continue refreshed when the suggestions arrive.\n\nSome algorithms are super complex, or require code dependencies which cannot be resolved through auto-completion help. Proprietary and confidential code may provide less context to the large language models, and, therefore, require more context in the comments for Code Suggestions. Follow your own pace and strategy, and leverage Code Suggestions in situations where they help with boilerplate code, or helper functions. \n\n> **Tip:** Explore [Repository X-Ray](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/repository_xray.html) for more Code Suggestions context, and test experimental features, for example, [support for more languages in VS Code](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html#add-support-for-more-languages-for-code-suggestions-in-vs-code). More insights can be found in the epic to [improve acceptance rate for Code Suggestions](https://gitlab.com/groups/gitlab-org/-/epics/13085).\n\n### Contribute using GitLab Duo\n\nYou can use GitLab Duo to contribute to open source projects, using Code Suggestions, code refactoring, documentation through explanations, or test generation.\n\nGitLab customers can [co-create GitLab using GitLab Duo](https://docs.gitlab.com/ee/user/gitlab_duo/use_cases.html#use-gitlab-duo-to-contribute-to-gitlab), too. Follow the updated guidelines for [AI-generated contributions](https://about.gitlab.com/community/contribute/dco-cla/#ai-generated-contributions), and watch an example recording from the GitLab Duo Coffee Chat: Contribute to GitLab using Code Suggestions and Chat:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/TauP7soXj-E\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Share your feedback\n\nGitLab Duo Code Suggestions enables more efficient development workflows. It requires hands-on practice and exercise through tutorials, team workshops, and guided training. Automated workflows with code quality, security scanning, and observability help tackle challenges with newly introduced source code at a much higher frequency. Taking advantage of all GitLab Duo features, including Chat, greatly improves the developer experience on the most comprehensive AI-powered DevSecOps platform.\n\nUse the best practices in this tutorial to kickstart your journey, follow the [GitLab Duo documentation](https://docs.gitlab.com/ee/user/gitlab_duo/index.html), and [ask our teams for GitLab Duo AI workshops](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/) (I have already shadowed customer workshops, they are great!). Please share your Code Suggestions feedback in [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/435783), including screenshots and videos (when possible).\n\n> [Try GitLab Duo for free today!](https://about.gitlab.com/gitlab-duo/#free-trial)",[9,704,724],{"slug":2975,"featured":91,"template":680},"top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo","content:en-us:blog:top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo.yml","Top Tips For Efficient Ai Powered Code Suggestions With Gitlab Duo","en-us/blog/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo.yml","en-us/blog/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo",{"_path":2981,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2982,"content":2987,"config":2993,"_id":2995,"_type":14,"title":2996,"_source":16,"_file":2997,"_stem":2998,"_extension":19},"/en-us/blog/track-machine-learning-model-experiments",{"title":2983,"description":2984,"ogTitle":2983,"ogDescription":2984,"noIndex":6,"ogImage":820,"ogUrl":2985,"ogSiteName":693,"ogType":694,"canonicalUrls":2985,"schema":2986},"Track ML model experiments with new GitLab MLFlow integration","Track the many versions of your machine learning models on GitLab using the MLFlow client.","https://about.gitlab.com/blog/track-machine-learning-model-experiments","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Track ML model experiments with new GitLab MLFlow integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eduardo Bonet\"}],\n        \"datePublished\": \"2023-05-11\",\n      }",{"title":2983,"description":2984,"authors":2988,"heroImage":820,"date":2990,"body":2991,"category":10,"tags":2992},[2989],"Eduardo Bonet","2023-05-11","\n\n\u003Ci>This blog is the latest post in an ongoing series about GitLab’s journey to \u003Ca href=\"/blog/ai-ml-in-devsecops-series/\">build and integrate AI/ML into our DevSecOps platform\u003C/a>. The first blog post can be found \u003Ca href=\"/blog/what-the-ml-ai/\">here\u003C/a>. Throughout the series, we’ll feature blogs from our product, engineering, and UX teams to showcase how we’re infusing AI/ML into GitLab.\u003C/i>\n\nThe GitLab DevSecOps platform now features [Machine Learning Model Experiments](https://docs.gitlab.com/ee/user/project/ml/experiment_tracking/), which is avaliable to all GitLab users, making GitLab a powerful tool for creating ML models. Organizations can now track the many versions of their ML models within the GitLab user interface, using the open source [MLFlow](https://github.com/mlflow/mlflow).\n\n\u003Cimg src=\"/images/blogimages/2023-05-11-gitlab-model-experiments/experiment.png\" alt=\"Model experiment\" style=\"border: 1px solid gray;\">\n\n## What is an ML model?\n\nAn ML model is the result of three components: code to extract the patterns from the data, the data where the \npatterns are extracted from, and the configuration used for both, often called \"hyperparameters\". Any change to any of these components can \nlead to changes in the model performance, and keeping track of all of these parts and the results can be challenging. \nExperiment tracking aims to make sense of this confusion by keeping a record of all of the variations created, \nalong with the artifacts and results of each trial.\n\n[MLFlow](https://github.com/mlflow/mlflow) is a popular open source solution for ML experiment tracking, \nproviding a client to log different model versions and their metadata. However, it puts the cost of deployment and managing \nits server onto the users.\n\nGitLab makes the tracking process easier not by deploying a managed MLFlow backend, but by \u003Ci>being an MLFlow backend itself\u003C/i>. This marries the best of both worlds: Data scientists don't need to learn yet another client as their code requires minimal to no changes, while GitLab provides everything else. There is no need to manage a server or to implement user management, so there is no need to configure your artifact storage –  this is all provided by the GitLab DevSecOps platform.\n\n## ML model experiment features in GitLab 16.0\n\nWatch this overview of the available features in 16.0:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/uxweU4zT40c\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n- **Create experiments and candidates using the MLFlow client**: Simply point the MLFlow client to your GitLab project and experiments and runs will be recorded on GitLab, with no additional setup necessary and no need to create a server. Note that MLFlow runs are called \"candidates\" in GitLab, as each of them is a candidate to become a version of a model.\n\n- **User access management**: Experiments are tied to a GitLab project, making it easy to control which users have access to which models. \n\n- **Manage candidates directly on the GitLab UI**: Search and explore your logged experiments on GitLab, using the UI you already know.\n\n- **Download candidate data as a CSV**: Data scientists that want to explore or create reports on an experiment can download the necessary data as a CSV file.\n\nTo get started, refer to the [documentation](https://docs.gitlab.com/ee/user/project/ml/experiment_tracking/#machine-learning-model-experiments).\n\n### More to come\n\nGitLab wants to help you manage the entire lifecycle of your machine learning model from creation to packaging, deployment, and monitoring. \nFor more information on what we are working on, keep an eye on the MLOps Incubation Engineering [handbook page](/handbook/engineering/incubation/mlops/) and on our [YouTube playlist](https://www.youtube.com/playlist?list=PL05JrBw4t0KpC6-JQy8lY4tNAZKXBaM_-).\n\nMachine Learning Model Experiments is an experimental feature available to all GitLab tiers, and we are looking for feedback so please [comment in this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/381660).\n\nContinue reading our \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\" series.\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[9,482,231,704],{"slug":2994,"featured":6,"template":680},"track-machine-learning-model-experiments","content:en-us:blog:track-machine-learning-model-experiments.yml","Track Machine Learning Model Experiments","en-us/blog/track-machine-learning-model-experiments.yml","en-us/blog/track-machine-learning-model-experiments",{"_path":3000,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3001,"content":3006,"config":3011,"_id":3013,"_type":14,"title":3014,"_source":16,"_file":3015,"_stem":3016,"_extension":19},"/en-us/blog/training-and-deploying-ai-models-with-gitlab-and-vertex-ai",{"title":3002,"description":3003,"ogTitle":3002,"ogDescription":3003,"noIndex":6,"ogImage":820,"ogUrl":3004,"ogSiteName":693,"ogType":694,"canonicalUrls":3004,"schema":3005},"Train and deploy AI models with GitLab and Google Cloud's Vertex AI","Demo of GitLab's DevSecOps capabilities combined with Vertex AI's scalable ML platform, designed with the aim of rapid and secure AI deployments.","https://about.gitlab.com/blog/training-and-deploying-ai-models-with-gitlab-and-vertex-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Train and deploy AI models with GitLab and Google Cloud's Vertex AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Regnard Raquedan\"}],\n        \"datePublished\": \"2023-06-08\",\n      }",{"title":3002,"description":3003,"authors":3007,"heroImage":820,"date":3008,"body":3009,"category":10,"tags":3010},[1465],"2023-06-08","\n\u003Ci>This blog is the latest post in an ongoing series about GitLab's journey to \u003Ca href=\"/blog/ai-ml-in-devsecops-series/\">build and integrate AI/ML into our DevSecOps platform\u003C/a>. The first blog post can be found \u003Ca href=\"/blog/what-the-ml-ai/\">here\u003C/a>. Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nMost development and engineering teams are now tasked with maintaining and deploying AI/ML-related code. In this context, the focus on security and efficiency becomes even more crucial. Companies are keen to capitalize on the benefits of AI swiftly while striving to decrease potential risks. GitLab can be used to orchestrate any AI/ML workloads, enabling teams to rapidly develop new generative AI capabilities. [GitLab recently announced a partnership with Google](https://about.gitlab.com/press/releases/2023-05-02-gitLab-and-google-cloud-partner-to-expand-ai-assisted-capabilities.html) to bring [generative AI on Google Cloud](https://cloud.google.com/ai/generative-ai) to our mutual customers.\n\nThis is a tutorial of how to use these tools to deploy an AI model with [Google Cloud's Vertex AI](https://cloud.google.com/vertex-ai) using GitLab to orchestrate the [ModelOps workload](https://about.gitlab.com/direction/modelops/). Our custom model training use case is simple introductory credit card fraud detection, a pertinent issue in the financial industry.\n\n## The solution\nOur solution is a Python-based credit card transaction fraud detection app. Once deployed, applications can use an API endpoint to make predictions on whether a submitted transaction is fraudulent or not.\n\n[Vertex AI](https://cloud.google.com/vertex-ai/docs) is Google Cloud's flagship AI/ML platform that lets users train and deploy machine learning models and AI applications. This is the platform where the API endpoint and model are hosted. While Vertex AI can handle training using prebuilt functions, this demo uses a custom training script written in Python.\n\nFor the demo’s purposes, GitLab hosts the [application source code](https://gitlab.com/gitlab-com/alliances/google/sandbox-projects/demos/vertex-ai) and helps to ensure quality and security by running the tests and scans automatically. We also use GitLab's CI/CD to execute the Python code, programmatically upload the resulting artifacts to Google Cloud Storage, and create the endpoint in Vertex AI.\n\nLet's take a high-level look at how the solution is designed and what it does:\n* Data preprocessing: To effectively detect fraudulent transactions, we address the initial imbalance in the raw transaction data. By employing the [Synthetic Minority Over-sampling Technique (SMOTE)](https://imbalanced-learn.org/stable/references/generated/imblearn.over_sampling.SMOTE.html), we duplicate instances of the minority class, enhancing the model's ability to identify patterns.\n* Model training: Using the balanced dataset, we train a [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) model. This creates a 'forest' of decision trees, each based on a random subset of the training data. It outputs the mode of the classes determined by the individual trees, making it well-suited for binary outcomes, such as identifying fraudulent transactions. The trained model is stored in a Google Cloud Storage bucket for later use.\n* Model deployment: Leveraging GitLab's DevSecOps platform, model deployment becomes straightforward. By committing the code to GitLab, the trained model is automatically deployed to Vertex AI. Subsequently, the API endpoint is also established.\n\n## Prerequisites\nBefore we dive into the details, let's make sure you have everything you need to get started with deploying your AI model using GitLab and Vertex AI.\n\nHere are the requirements:\n1. Google Cloud project\n1. Google Cloud service account with these permissions:\n   1. AI Platform Admin\n   1. Service Account User\n   1. Storage Admin\n   1. Storage Object Admin\n   1. Vertex AI Administrator\n1. GitLab project\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube-nocookie.com/embed/p7GTsbSQWF4\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n## Demo walkthrough\nFollowing the [video walkthrough](https://youtu.be/p7GTsbSQWF4), here's a guide for setting up an AI pipeline with GitLab and Google Cloud's Vertex AI.\n\n### Step 1: GitLab and Google Cloud connection\nFirstly, we need GitLab to interact with Google Cloud. Insert your Google Cloud Service Account credentials into your GitLab project's environment variables (make sure they're Base64 encoded for security).\n\n### Step 2: Uploading data to Vertex AI\nMove to the Vertex AI section in Google Cloud. Here, [create and upload your dataset](https://cloud.google.com/vertex-ai/docs/tabular-data/forecasting/create-dataset). In our demo, we use a 'Tabular' dataset for 'Classification' as we're predicting credit card fraud.\n\n### Step 3: Creating the CI/CD pipeline\nBack to GitLab to structure our [CI/CD pipeline](https://gitlab.com/gitlab-com/alliances/google/sandbox-projects/demos/vertex-ai/-/ci/editor). It comprises three stages:\n\n**Test:** Quality and security checks.\n\n**Train:** Executes a Python script to train the model, outputting a .pkl  artifact.\n\n```\ntrain:\n stage: train\n script:\n   - apt-get update && apt-get install -y python3-pip python3-venv\n   - python3 -m venv venv\n   - source venv/bin/activate\n   - pip install --upgrade pip\n   - pip install pandas scikit-learn joblib imbalanced-learn google-cloud-storage\n   - python3 src/train.py\n artifacts:\n   paths:\n     - model.pkl\n```\n\n**Deploy:** Uses Google Cloud's Deep Learning platform container to deploy the trained model on Vertex AI.\n\n```\ndeploy:\n stage: deploy\n image:\n   name: us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-5\n   entrypoint: [\"\"]\n script:\n   - python src/deploy.py\n dependencies:\n   - train\n only:\n   - main\n```\n\n### Step 4: Model training\nThe [training code](https://gitlab.com/gitlab-com/alliances/google/sandbox-projects/demos/vertex-ai/-/blob/main/src/train.py), running on GitLab CI/CD, fetches data from Vertex AI, processes it, trains our RandomForestClassifier model, and saves the model to Google Cloud Storage.\n\n### Step 5: Model deployment\nThe [deployment script](https://gitlab.com/gitlab-com/alliances/google/sandbox-projects/demos/vertex-ai/-/blob/main/src/deploy.py) creates an endpoint on Vertex AI and deploys our trained model there, utilizing the service account credentials we set initially.\n\n### Step 6: Prediction testing\n\n![Screenshot of Vertex AI](https://about.gitlab.com/images/blogimages/vertex-ai-screenshot.png)\n\nFinally, within Vertex AI, navigate to your model and test its predictions using an input JSON request. If all goes well, you'll get a response from your model.\n\nThere you have it: an AI pipeline with GitLab and Google Cloud's Vertex AI. This combination of GitLab's DevSecOps capabilities with Vertex AI's scalable ML platform is designed with the aim of rapid and secure AI deployments.\n",[703,725,704,9],{"slug":3012,"featured":6,"template":680},"training-and-deploying-ai-models-with-gitlab-and-vertex-ai","content:en-us:blog:training-and-deploying-ai-models-with-gitlab-and-vertex-ai.yml","Training And Deploying Ai Models With Gitlab And Vertex Ai","en-us/blog/training-and-deploying-ai-models-with-gitlab-and-vertex-ai.yml","en-us/blog/training-and-deploying-ai-models-with-gitlab-and-vertex-ai",{"_path":3018,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3019,"content":3025,"config":3031,"_id":3033,"_type":14,"title":3034,"_source":16,"_file":3035,"_stem":3036,"_extension":19},"/en-us/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review",{"title":3020,"description":3021,"ogTitle":3020,"ogDescription":3021,"noIndex":6,"ogImage":3022,"ogUrl":3023,"ogSiteName":693,"ogType":694,"canonicalUrls":3023,"schema":3024},"GitLab transforms code review with machine learning tools","Learn how last year's acquisition has resulted in impactful features for the One DevOps Platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668002/Blog/Hero%20Images/pg-gear.jpg","https://about.gitlab.com/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"UnReview a year later: How GitLab is transforming DevOps code review with ML-powered functionality\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2022-06-02\",\n      }",{"title":3026,"description":3021,"authors":3027,"heroImage":3022,"date":3028,"body":3029,"category":1156,"tags":3030},"UnReview a year later: How GitLab is transforming DevOps code review with ML-powered functionality",[1410],"2022-06-02","\n\nA little over a year ago, [GitLab acquired UnReview](/press/releases/2021-06-02-gitlab-acquires-unreview-machine-learning-capabilities.html), a machine learning-based solution for automatically identifying [relevant code reviewers](/stages-devops-lifecycle/create/) and distributing review workloads and knowledge. Our goal is to integrate UnReview’s ML-powered code review features throughout GitLab, the One DevOps Platform. We checked in with Taylor McCaslin, principal product manager, ModelOps, at GitLab, to find out the impact UnReview has had so far and what comes next.\n\n**The idea of applying machine learning to code review was already underway at GitLab before the UnReview acquisition. What was it about ML/AI and automation that seemed a good fit for the code review process? How did the UnReview acquisition affect that strategy?**\n\nThe acquisition of UnReview gave GitLab a practical way to get started with a really focused value proposition that was obvious to the platform. ML/AI is a lot more than just having a useful algorithm. UnReview and its team gave GitLab talent with experience building MLOps pipelines and working with production DataOps workflows. As a source code management ([SCM](/solutions/source-code-management/)) and continuous integration ([CI](/topics/ci-cd/)) platform, MLOps and DataOps are key ambitions for our ModelOps stage. UnReview is the foundational anchor of our AI Assisted group, and we anticipate developing more ML-powered features with the base that we’ve built integrating UnReview into our One DevOps platform. If it’s something you manually set today within GitLab, we’ll consider suggestions and automations: suggested labels, assignees, issue relationships, etc. You can learn more about our plans on our [AI Assisted direction page](/direction/modelops/ai_assisted/).\n\n> You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.\n\n**There were [three specific objectives with the UnReview project](/handbook/engineering/development/data-science/ai-assisted/projects/unreview/#overview) when you first started:**\n- **Eliminate the time wasted manually searching for an appropriate code reviewer to review code changes.**\n- **Make optimum recommendations that consider the reviewers’ experience and optimize the review load across the team, which additionally facilitates knowledge sharing.**\n- **Provide analytics on the state of code review in the project, explaining why a particular code reviewer is recommended.**\n\n**Have you had to change or add to these in any way?**\n\nWe now have Suggested Reviewers running for external beta customers as well as dogfooding it internally. We’ve learned a lot about what makes a good code reviewer. Some of the obvious things like context with the changed files and history of committing to that area of code are obvious. But there are less obvious things like what type of code someone has experience with (front-end or back-end).\n\nWe’re finding the concept of recency interesting: the idea that people who more recently interacted with files and functions may be better suited to review the code. Also, people leave companies, and that’s usually not something that can be inferred by the source graph, so we’re working on merging additional GitLab activity data with the recommendation engine.\n\nIn addition, we’re thinking a lot about bias in our recommendations. For example, a senior engineer likely has the most commits across a project, but we don’t always want to recommend a senior engineer. The more we work with the algorithm and recommendations, the more nuanced we find it.\n\nNot every organization does code review the same way, so we’re considering building different models for those that have no process versus organizations that have very rigid and hierarchical reviewer requirements. We also have to consider how recommendations interact with other features of the platform like code owners, maintainer roles, and commit access.\n\nWe’ve never been more excited about the potential of machine learning within GitLab. Some of the feedback we’ve had from beta customers are “this feels like magic” and that honestly encapsulates what we’re going for. Sometimes the right code reviewer is just a feeling that you can’t quite put your finger on. Through data and a little bit of magic, we may see Suggested Reviewers help speed up workflows, and cut down on back and forth and wasted time trying to find someone to do a great review of your code.\n\n**Introducing ML-powered features can come with challenges, especially being GitLab’s first data science feature. Can you speak to some of those challenges and how the team overcame them?**\n\nIt has been about a year since we closed the transaction. During that time period we’ve introduced a lot of new concepts to GitLab. Access to real-time data within the feature with DataOps extraction and cleaning of platform activity data. We have an end-to-end MLOps pipeline running 100% within GitLab CI that extracts, builds, trains, and deploys the UnReview model, and new observability metrics to know if the whole system is working. These are all foundational concepts that we’ve had to build from the ground up.\n\nAlso, we’ve introduced Python to the GitLab tech stack and have to develop new engineering standards and hiring interview practices to find the right talent for this team. We’re now turning the corner of this foundational work and I anticipate that relatively soon we’ll release Suggested Reviewers fully integrated with the platform and UI.\n\nMilestones have been part of the way we’ve sliced up the integration work. We have a variety of internal milestones we’ve been tracking against, including porting the model into GitLab SCM and CI, building the Dataops and MLOps pipelines, and internal and external customer betas. It’s helpful to have these milestones to know what’s most important at any given time and not to get overwhelmed with all the moving pieces. We’re paving a new path with ML-powered features at GitLab, and once we’re done we’ll have a repeatable process and template to replicate over and over with new data science-powered features.\n\n**What has been the most surprising thing you’ve encountered or learned since UnReview first debuted?**\n\nCode Reviewers are foundational to the software development lifecycle. We thought this would be a really straightforward feature, but it turns out people REALLY care about recommendations. People hate bad suggestions so when the recommendations are wrong, the feedback is fast and furious. But when it’s right, it feels like magic. That really surprised me how positively people respond to a great suggestion.\n\nA lot of GitLab users have asked me what our success metric is for Suggested Reviewers. It should just feel like magic. Maybe you don’t know why someone was chosen, but you just feel they were the right person to review the change. And hopefully that leads to a more thoughtful code review, reduces the back and forth of trying to find someone to review your code, and ultimately creates a better experience end-to-end. A lot of engineers dread code reviews; we want to change that. I hope Suggested Reviewers can take the pain out of the experience and make it something engineers look forward to. That’s the feeling we’re trying to create with our recommendations. Obvious but magic.\n\n**What’s next for UnReview specifically and DevOps code review more generally? Where do you see the next big advances happening?**\n\nWe’re just scratching the surface. There are so many opportunities for recommendations and automations across the platform. We have a lot of data at GitLab, from the source graph, contribution history, CI builds, test logs, security scans, and deployment data. We believe all of this can be integrated together. I’m particularly excited about what we’re calling [Intelligent Code Security](/direction/modelops/ai_assisted/#categories). The idea is that we will be able to look at your source code as you’re writing it, analyze it for security vulnerabilities, and not only suggest fixes to common security flaws, but also apply that change, run your CI, confirm the build succeeds, confirm the vulnerability was resolved, and possibly even deploy that change, all automatically.\n\nImagine the future where your code gets more secure automatically while you sleep. That sounds wild, but we have the data to power [a feature like this in the future](/direction/modelops/ai_assisted/#categories). Suggested Reviewers is just the beginning. We haven’t seen many DevOps platforms fully embrace the data, code, and activity data that they have in a material way. I think we’ll see a lot more in this space moving forward as development platforms identify the massive opportunities to drive efficiencies and remove the frustrating parts of software development from the process.\n",[849,786,1035,231,9],{"slug":3032,"featured":6,"template":680},"unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review","content:en-us:blog:unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review.yml","Unreview A Year Later How Gitlab Is Being Transformed By Ml Powered Code Review","en-us/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review.yml","en-us/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review",{"_path":3038,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3039,"content":3045,"config":3051,"_id":3053,"_type":14,"title":3054,"_source":16,"_file":3055,"_stem":3056,"_extension":19},"/en-us/blog/use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace",{"title":3040,"description":3041,"ogTitle":3040,"ogDescription":3041,"noIndex":6,"ogImage":3042,"ogUrl":3043,"ogSiteName":693,"ogType":694,"canonicalUrls":3043,"schema":3044},"Use GitLab AI features out-of-the-box in a GitLab Workspace","GitLab Workspaces now ships with the GitLab workflow extension preinstalled, providing access to powerful AI features like GitLab Duo Chat and Code Suggestions for increased productivity.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098843/Blog/Hero%20Images/Blog/Hero%20Images/securitylifecycle-light_securitylifecycle-light.png_1750098843047.png","https://about.gitlab.com/blog/use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Use GitLab AI features out-of-the-box in a GitLab Workspace\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Safwan Ahmed\"}],\n        \"datePublished\": \"2024-07-24\",\n      }",{"title":3040,"description":3041,"authors":3046,"heroImage":3042,"date":3048,"body":3049,"category":10,"tags":3050},[3047],"Safwan Ahmed","2024-07-24","AI is transforming the way we get work done, from helping automate mundane tasks to optimizing aspects of our day-to-day workflow. Of particular relevance has been [generative AI’s ability](https://about.gitlab.com/the-source/ai/how-to-put-generative-ai-to-work-in-your-devsecops-environment/) to support developers in getting the job done, from code snippet suggestions to concise summaries of technical questions. These AI tools have been embedded in the development lifecycle through integrations with existing software like code editors and CI/CD platforms. Thanks to these integrations, particularly in the case of code editors, developers can have an AI assistant that complements their skills within their development environment.\n\nWhile these AI tools can help boost productivity, setting them up in an existing development environment may not be preferable. For example, you may not want to install a new dependency on your local workstation that could affect your setup, you may have security or privacy concerns about running AI tools on your computer, or you may find it hard to give the tooling context on your existing workflow. GitLab resolves these issues by providing a suite of tools that allow you to leverage the power of AI in [a remote development workspace](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/) right out of the box. In this blog, you'll learn about the GitLab features that make this possible and how to set up your [workspace environment](https://docs.gitlab.com/ee/user/workspace/) to get started.\n\n## GitLab workflow extension for VS Code\n\nGitLab workflow extension for VS Code integrates GitLab into the VS Code editor. It brings into scope key elements of your GitLab workflow such as issues, merge requests, and pipeline status. For more information, visit [the GitLab workflow extension documentation](https://docs.gitlab.com/ee/editor_extensions/visual_studio_code/).\n\n## GitLab Workspaces\n\nGitLab Workspaces provide an isolated development environment to make changes to your GitLab projects. Workspaces offer a platform to work on your projects without the complexity of setting up local dependencies. Workspaces also provide reproducible development setups, as a workspace environment configuration created by one developer can be shared with others. GitLab Workspaces are configured to use the VS Code editor and ship with the workflow extension preinstalled. To learn more, visit the [GitLab Workspaces documentation](https://docs.gitlab.com/ee/user/workspace/).\n\n## GitLab Duo Chat and Code Suggestions\n\n[GitLab Duo Chat](https://about.gitlab.com/blog/gitlab-duo-chat-now-generally-available) and [GitLab Duo Code Suggestions](https://about.gitlab.com/blog/gitlab-duo-code-suggestions-is-generally-available/) are part of the GitLab Duo suite of AI features enhancing developer productivity. Chat and Code Suggestions are integrated into the workflow extension and are GitLab context-aware. This allows you to ask GitLab Duo questions about items like issues and merge requests and to automatically have access to code suggestions and code completion. This integration requires [a GitLab Duo license](https://about.gitlab.com/gitlab-duo/). See the [GitLab Duo Chat documentation](https://docs.gitlab.com/ee/user/gitlab_duo_chat/) and [GitLab Duo Code Suggestions documentation](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/) for more information.\n\n## How to set up the Workflow extension, Workspaces, and GitLab Duo to work together\n\nWhile these features are impressive on their own, when combined they deliver on the promise of an easy-to-spin-up, isolated, AI-driven development environment. Here are the steps to get this powerhouse up and running.\n\n## Create a workspace\n\nFollow this [comprehensive but easy-to-follow tutorial](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/) to create a remote development workspace.\n\n## Validate GitLab Workflow is active\n\nAfter your workspace is up and running, you should see a GitLab icon on the side of your editor like the following:\n\n![Arrow pointing to GitLab tanuki icon](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098853/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098853108.png)\n\nYou can then use the workflow extension to bring up merge requests assigned to you in the current project in GitLab. To do this, access the command palette by hitting `command + shift + P` and entering `GitLab: Show Merge Requests Assigned to Me`. This will redirect you to GitLab and show your assigned MRs.\n\n![Arrow pointing to 'GitLab: Show Merge Requests Assigned to Me'](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098853/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098853109.png)\n\nFor more tips and tricks, read [Visual Studio code editor: Eight tips for using GitLab VS Code](https://about.gitlab.com/blog/vscode-workflows-for-working-with-gitlab/).\n\n## Use GitLab Duo Chat\n\nYou should also see a second, smaller GitLab icon on your sidebar. This gives you access to GitLab Duo Chat. Feel free to ask it a question.\n\n![Arrow pointing to GitLab tanuki icon with sparkles around it](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098853/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098853111.png)\n\n## Use GitLab Duo Code Suggestions\n\nOpen up any source file in your directory. You can begin typing code and have predictive suggestions, powered by GitLab Duo Code Suggestions, pop up –  you can insert them by hitting the tab key. The example below shows my attempt to write a string processing function. Code Suggestions has inferred I would want to split the passed string into spaces, which is indeed my intention.\n\n![Code Suggestions suggesting that the passed string into spaces](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098853/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098853112.png)\n\nSuppose I have completed my string processing function above and would like to generate unit tests for it but want to avoid the chore of writing boilerplate code. You can provide a comment in your editor and have Code Suggestions generate code for you like the following:\n\n![Shows boilerplate code for generating unit tests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098853/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098853115.png)\n\nCode Suggestions implements a whole unit test for my function, covering happy and sad paths.\n\nFor more exciting uses of the GitLab Duo suite, check out these articles:\n* [10 best practices for using AI-powered GitLab Duo Chat](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/)\n* [Top tips for efficient AI-powered Code Suggestions with GitLab Duo](https://about.gitlab.com/blog/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo/)\n* [\"Developing GitLab Duo\" blog series](https://about.gitlab.com/blog/developing-gitlab-duo-series/)\n\n# Next steps\n\nGitLab Workspaces is coming up with more exciting integrations and features that will enhance your remote development experience, be sure to check out the [category epic](https://gitlab.com/groups/gitlab-org/-/epics/7419) to know what’s coming next!\n\n> Sign up for [a free trial of GitLab Duo](https://about.gitlab.com/gitlab-duo/) today!\n",[9,746,231,704],{"slug":3052,"featured":91,"template":680},"use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace","content:en-us:blog:use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace.yml","Use Gitlab Ai Features Out Of The Box In A Gitlab Workspace","en-us/blog/use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace.yml","en-us/blog/use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace",{"_path":3058,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3059,"content":3065,"config":3070,"_id":3072,"_type":14,"title":3073,"_source":16,"_file":3074,"_stem":3075,"_extension":19},"/en-us/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project",{"title":3060,"description":3061,"ogTitle":3060,"ogDescription":3061,"noIndex":6,"ogImage":3062,"ogUrl":3063,"ogSiteName":693,"ogType":694,"canonicalUrls":3063,"schema":3064},"Use GitLab Duo to build and deploy a simple Quarkus-native project","This tutorial shows how a Java application is compiled to machine code and deployed to a Kubernetes cluster using a CI/CD pipeline. See how AI makes the process faster and more efficient.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666069/Blog/Hero%20Images/AdobeStock_639935439.jpg","https://about.gitlab.com/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Use GitLab Duo to build and deploy a simple Quarkus-native project\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2024-10-17\",\n      }",{"title":3060,"description":3061,"authors":3066,"heroImage":3062,"date":3067,"body":3068,"category":10,"tags":3069},[720],"2024-10-17","In [“How to automate software delivery using Quarkus and GitLab,”](https://about.gitlab.com/blog/how-to-automate-software-delivery-using-quarkus-and-gitlab/) you learned how to develop and deploy a simple Quarkus-JVM application to a Kubernetes cluster using [GitLab Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/). Now, you'll learn how to use Quarkus-native to compile a Java application to machine code and deploy it to a Kubernetes cluster using a CI/CD pipeline. Follow our journey from development to deployment leveraging [GitLab Duo](https://about.gitlab.com/gitlab-duo/) as our AI companion, including the specific prompts we used.\n\n## What is Quarkus?\n\n[Quarkus](https://quarkus.io/), also known as the Supersonic Subatomic Java, is an open source, Kubernetes-native Java stack tailored to OpenJDK HotSpot and GraalVM. The Quarkus project recently moved to the [Commonhaus Foundation](https://www.commonhaus.org/), a nonprofit organization dedicated to the sustainability of open source libraries and frameworks that provides a balanced approach to governance and support.\n\n## Prerequisites\n\nThis tutorial assumes:\n\n- You have a running Kubernetes cluster, e.g. GKE.\n- You have access to the Kubernetes cluster from your local laptop via the `kubectl` command.\n- The cluster is connected to your GitLab project.\n- You have [Maven (Version 3.9.6 or later)](https://maven.apache.org/) installed on your local laptop.\n- You have Visual Studio Code installed on your local laptop.\n\nIf you’d like to set up a Kubernetes cluster connected to your GitLab project, you can follow the instructions in this [tutorial](https://about.gitlab.com/blog/eliminate-risk-with-feature-flags-tutorial/), up to but not including the “Creating an instance of MySQL database in your cluster via Flux” section (you do not need a database for this tutorial).\n\nYou will also need to install an nginx ingress in your Kubernetes cluster. Here are two ways to do this:\n1. You can follow the instructions in [“Creating and importing projects”](https://about.gitlab.com/blog/eliminate-risk-with-feature-flags-tutorial/#creating-and-importing-projects), up to the creation of the variable `KUBE_INGRESS_BASE_DOMAIN`.\n2. Or, just create an ingress in your Kubernetes cluster by following the instructions in our [Auto DevOps with GKE documentation](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html#install-ingress).\n\n**NOTE:** For this article, we used the first method above to install an ingress and cert-manager in the Kubernetes cluster.\n\n## Creating necessary project files using GitLab Duo Chat\n\nWe started our endeavor from VS Code and an empty project called `quarkus-native`, which we had previously created in GitLab and had already cloned to our local laptop.\n\n1. We opened GitLab Duo Chat, within VS Code, and entered the following prompt:\n\n**_Create a “Hello World” Quarkus application that can be natively compiled_**\n\n![hello-world-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/hello-world-prompt.png)\n\nChat replied with the prerequisites and process of what to do to create a simple “Hello World” Quarkus application. At this point, we were after the generated body of the class `HelloResource.java`:\n\n```\npackage org.acme;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\n@Path(\"/hello\")\npublic class HelloResource {\n\n    @GET\n    @Produces(MediaType.TEXT_PLAIN)\n    public String hello() {\n        return \"Hello World\";\n    }\n}\n```\n\n2. We knew that for Quarkus we needed a special Dockerfile, named `Dockerfile.native`, as part of the creation of the image for the application executable. We then entered the following prompt in Chat:\n\n**_Create the Dockerfile.native file for the sample application above_**\n\n![create-dockerfile-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675941/Blog/Content%20Images/create-dockerfile-prompt.png)\n\nChat replied with a very thorough answer about this file, its contents, and the containerization process. At this point, we were just after the generated body of the file `Dockerfile.native`:\n\n```\n####\n# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode\n####\nFROM registry.access.redhat.com/ubi8/ubi-minimal:8.6\nWORKDIR /work/\nRUN chown 1001 /work \\\n    && chmod \"g+rwX\" /work \\\n    && chown 1001:root /work\nCOPY --chown=1001:root target/*-runner /work/application\n\nEXPOSE 8080\nUSER 1001\n\nCMD [\"./application\", \"-Dquarkus.http.host=0.0.0.0\"]\n```\n\n3. Next, we asked Chat to generate the `pom.xml` for this application by entering the following prompt:\n\n**_Create the pom.xml file for the sample application above_**\n\n![create-pom-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-pom-prompt.png)\n\nChat provided the content of the `pom.xml` file, an explanation of its contents, and how to use it. At this point, we were after the generated body of the file `pom.xml`:\n\n```\n\u003C?xml version=\"1.0\"?>\n\u003Cproject xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd\" xmlns=\"http://maven.apache.org/POM/4.0.0\"\n    xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n  \u003CmodelVersion>4.0.0\u003C/modelVersion>\n  \u003CgroupId>org.acme\u003C/groupId>\n  \u003CartifactId>hello-world-quarkus\u003C/artifactId>\n  \u003Cversion>1.0.0-SNAPSHOT\u003C/version>\n  \u003Cproperties>\n    \u003Ccompiler-plugin.version>3.10.1\u003C/compiler-plugin.version>\n    \u003Cmaven.compiler.release>11\u003C/maven.compiler.release>\n    \u003Cproject.build.sourceEncoding>UTF-8\u003C/project.build.sourceEncoding>\n    \u003Cproject.reporting.outputEncoding>UTF-8\u003C/project.reporting.outputEncoding>\n    \u003Cquarkus.platform.artifact-id>quarkus-bom\u003C/quarkus.platform.artifact-id>\n    \u003Cquarkus.platform.group-id>io.quarkus.platform\u003C/quarkus.platform.group-id>\n    \u003Cquarkus.platform.version>2.16.5.Final\u003C/quarkus.platform.version>\n    \u003CskipITs>true\u003C/skipITs>\n    \u003Csurefire-plugin.version>3.0.0-M7\u003C/surefire-plugin.version>\n  \u003C/properties>\n  \u003CdependencyManagement>\n    \u003Cdependencies>\n      \u003Cdependency>\n        \u003CgroupId>${quarkus.platform.group-id}\u003C/groupId>\n        \u003CartifactId>${quarkus.platform.artifact-id}\u003C/artifactId>\n        \u003Cversion>${quarkus.platform.version}\u003C/version>\n        \u003Ctype>pom\u003C/type>\n        \u003Cscope>import\u003C/scope>\n      \u003C/dependency>\n    \u003C/dependencies>\n  \u003C/dependencyManagement>\n  \u003Cdependencies>\n    \u003Cdependency>\n      \u003CgroupId>io.quarkus\u003C/groupId>\n      \u003CartifactId>quarkus-arc\u003C/artifactId>\n    \u003C/dependency>\n    \u003Cdependency>\n      \u003CgroupId>io.quarkus\u003C/groupId>\n      \u003CartifactId>quarkus-resteasy\u003C/artifactId>\n    \u003C/dependency>\n    \u003Cdependency>\n      \u003CgroupId>io.quarkus\u003C/groupId>\n      \u003CartifactId>quarkus-junit5\u003C/artifactId>\n      \u003Cscope>test\u003C/scope>\n    \u003C/dependency>\n    \u003Cdependency>\n      \u003CgroupId>io.rest-assured\u003C/groupId>\n      \u003CartifactId>rest-assured\u003C/artifactId>\n      \u003Cscope>test\u003C/scope>\n    \u003C/dependency>\n  \u003C/dependencies>\n  \u003Cbuild>\n    \u003Cplugins>\n      \u003Cplugin>\n        \u003CgroupId>${quarkus.platform.group-id}\u003C/groupId>\n        \u003CartifactId>quarkus-maven-plugin\u003C/artifactId>\n        \u003Cversion>${quarkus.platform.version}\u003C/version>\n        \u003Cextensions>true\u003C/extensions>\n        \u003Cexecutions>\n          \u003Cexecution>\n            \u003Cgoals>\n              \u003Cgoal>build\u003C/goal>\n              \u003Cgoal>generate-code\u003C/goal>\n              \u003Cgoal>generate-code-tests\u003C/goal>\n            \u003C/goals>\n          \u003C/execution>\n        \u003C/executions>\n      \u003C/plugin>\n      \u003Cplugin>\n        \u003CartifactId>maven-compiler-plugin\u003C/artifactId>\n        \u003Cversion>${compiler-plugin.version}\u003C/version>\n        \u003Cconfiguration>\n          \u003CcompilerArgs>\n            \u003Carg>-parameters\u003C/arg>\n          \u003C/compilerArgs>\n        \u003C/configuration>\n      \u003C/plugin>\n      \u003Cplugin>\n        \u003CartifactId>maven-surefire-plugin\u003C/artifactId>\n        \u003Cversion>${surefire-plugin.version}\u003C/version>\n        \u003Cconfiguration>\n          \u003CsystemPropertyVariables>\n            \u003Cjava.util.logging.manager>org.jboss.logmanager.LogManager\u003C/java.util.logging.manager>\n            \u003Cmaven.home>${maven.home}\u003C/maven.home>\n          \u003C/systemPropertyVariables>\n        \u003C/configuration>\n      \u003C/plugin>\n      \u003Cplugin>\n        \u003CartifactId>maven-failsafe-plugin\u003C/artifactId>\n        \u003Cversion>${surefire-plugin.version}\u003C/version>\n        \u003Cexecutions>\n          \u003Cexecution>\n            \u003Cgoals>\n              \u003Cgoal>integration-test\u003C/goal>\n              \u003Cgoal>verify\u003C/goal>\n            \u003C/goals>\n            \u003Cconfiguration>\n              \u003CsystemPropertyVariables>\n                \u003Cnative.image.path>${project.build.directory}/${project.build.finalName}-runner\u003C/native.image.path>\n                \u003Cjava.util.logging.manager>org.jboss.logmanager.LogManager\u003C/java.util.logging.manager>\n                \u003Cmaven.home>${maven.home}\u003C/maven.home>\n              \u003C/systemPropertyVariables>\n            \u003C/configuration>\n          \u003C/execution>\n        \u003C/executions>\n      \u003C/plugin>\n    \u003C/plugins>\n  \u003C/build>\n  \u003Cprofiles>\n    \u003Cprofile>\n      \u003Cid>native\u003C/id>\n      \u003Cactivation>\n        \u003Cproperty>\n          \u003Cname>native\u003C/name>\n        \u003C/property>\n      \u003C/activation>\n      \u003Cproperties>\n        \u003CskipITs>false\u003C/skipITs>\n        \u003Cquarkus.package.type>native\u003C/quarkus.package.type>\n      \u003C/properties>\n    \u003C/profile>\n  \u003C/profiles>\n\u003C/project>\n```\n\n4. We then asked Chat to generate the Quarkus test file for this application by entering the following prompt:\n\n**_Create the content for the Quarkus test for the application above_**\n\n![create-quarkus-test-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-quarkus-test-prompt.png)\n\nChat returned the body for the Quarkus test, how to use it, and the body of the Java class for the test needed for its native compilation. At this point, we were after the generated bodies of the file `HelloResourceTest.java` and `HelloResourceIT.java`.\n\nHere is the generated body for `HelloResourceTest.java`:\n\n```\npackage org.acme;\n\nimport io.quarkus.test.junit.QuarkusTest;\nimport org.junit.jupiter.api.Test;\n\nimport static io.restassured.RestAssured.given;\nimport static org.hamcrest.CoreMatchers.is;\n\n@QuarkusTest\npublic class HelloResourceTest {\n\n    @Test\n    public void testHelloEndpoint() {\n        given()\n          .when().get(\"/hello\")\n          .then()\n             .statusCode(200)\n             .body(is(\"Hello World\"));\n    }\n\n}\n```\n\nHere is the generated body for `HelloResourceIT.java`:\n\n```\npackage org.acme;\n\nimport io.quarkus.test.junit.QuarkusIntegrationTest;\n\n@QuarkusIntegrationTest\npublic class HelloResourceIT extends HelloResourceTest {\n    // Execute the same tests but in native mode.\n}\n```\n\n5. We needed to know how to organize these files in the GitLab project, so we asked about the directory structure for all these files by entering the following prompt in Chat:\n\n**_Give me the entire directory structure for this project including the location of each file, e.g. pom.xml, Dockerfile.native, application.properties, HelloResource.java, HelloResourceTest.java, and the location of the target directory_**\n\n![create-dir-struct-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-dir-struct-prompt.png)\n\nChat replied with a detailed diagram about the entire directory structure for the project and where all these files should be located as well as a description of the purpose of each of them. It even mentioned that the directory `target/` and its contents should not be version controlled since it was generated by the build process. Another interesting aspect of the reply was the existence of a file called `resources/application.properties` in the directory structure.\n\n![dir-struct-chat-response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/dir-struct-chat-response.png)\n\nWith all this information in our hands, we were ready to start creating these files in our GitLab project.\n\n## Populating our project with the generated content for each file\n\nWe created each of the following files in their corresponding location and their generated content as provided by Chat:\n\n- `src/main/java/org/acme/HelloResource.java`\n- `resources/application.properties`\n- `src/test/java/org/acme/HelloResourceTest.java`\n- `src/test/java/org/acme/HelloResourceIT.java`\n- `pom.xml`\n- `Dockerfile.native`\n\n**NOTE:** We considered using GitLab Auto Deploy for this endeavor but later realized that it would not be a supported option. We are mentioning this because in the video at the end of this tutorial, you will see that we asked Chat: `How to set the service internalPort to 8080 for auto deploy`. Then we created a file named `.gitlab/auto-deploy-values.yaml` with the generated content from Chat. The creation of this file is not necessary for this tutorial.\n\nBefore we started tackling the pipeline to build, containerize, and deploy the application to our Kubernetes cluster, we decided to generate the executable locally on our Mac and test the application locally.\n\n## Testing the application locally\n\nHere is the process we went through to test the application on our local machine.\n\n1. To build the application on the local Mac laptop, from a Terminal window, we entered the following command:\n\n```\nmvn clean package -Pnative\n```\n\n![first-build](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/first-build.png)\n\nThe native compilation failed with the error message:\n\n`Cannot find the ‘native-image’ in the GRAALVM_HOME, JAVA_HOME and System PATH. Install it using ‘gu install native-image’`\n\n2. So, we used our trusty GitLab Duo Chat again and asked it the following:\n\n**_The command “mvn clean package -Pnative” is failing with error “java.lang.RuntimeException: Cannot find the ‘native-image’ in the GRAALVM_HOME, JAVA_HOME and System PATH. Install it using gu install native-image”. I’m using a MacOS Sonoma. How do I fix this error on my Mac?_**\n\n![how-to-fix-build-failure-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-fix-build-failure-prompt.png)\n\nChat replied with a detailed set of steps on how to install the necessary software and set the appropriate environment variables.\n\n3. We copied and pasted the following commands from the Chat window to a Terminal window:\n\n```\nbrew install –cask graalvm/tap/graalvm-ce-java17\nexport JAVA_HOME=/Library/Java/JavaVIrtualMachines/graalvm-ce-java17-22.3.1\nexport GRAALVM_HOME=${JAVA_HOME}\nexport PATH=${GRAALVM_HOME}/bin:$PATH\nxattr -r -d com.apple.quarantine ${GRAALVM_HOME}/../..\ngu install native-image\n```\n\nThe commands above installed the community edition of GraalVM Version 22.3.1 that supported Java 17. We noticed, during the brew install, that the version of the GraalVM being installed was `java17-22.3.1`, so we had to update the pasted value for `JAVA_HOME` from `graalvm-ce-java17-22.3.0` to `graalvm-ce-java17-22.3.1`.\n\nWe also had to run the `xattr` command to get the GraalVM, which we had downloaded and installed on our Mac, out of quarantine so that it could run locally. Lastly, we installed the GraalVM native-image.\n\n4. At this point, we again, from a Terminal window, entered the following command to build the application on the local Mac laptop:\n\n```\nmvn clean package -Pnative\n```\n\nThis time the compilation was successful and an executable was generated in the `target` directory.\n\n![successful-local-compilation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/successful-local-compilation.png)\n\n5. We ran the executable by entering the following commands from a Terminal window:\n\n```\ncd target\n./quarkus-native-1.0.0-SNAPSHOT-runner “-Dquarkus.http.host=0.0.0.0”\n```\n\n![executable-local-run](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/executable-local-run.png)\n\n6. With the application running, we opened a browser window, and in the URL field, we entered:\n\n```\nhttp://localhost:8080/hello\n```\n\n![app-running-locally](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/app-running-locally.png)\n\nThe application returned the string `Hello World`, which was displayed in the browser window.\n\nAt this point, we committed and pushed all the changes to our GitLab project and started working on creating a CI/CD pipeline that would build and deploy the application to a Kubernetes cluster running on the cloud.\n\nBut before continuing, we remembered to add, commit, and push a `.gitignore` file to our project that included the path `target/`, since this was the directory where the executable would be created and we didn’t need to keep it - or its contents - under version control.\n\n## Creating the pipeline with GitLab Duo Chat\n\nNow that we had already successfully tested the application locally on our Mac, we needed to create the CI/CD pipeline that would compile the application, containerize it, and deploy it to our Kubernetes cluster. We wanted to keep the pipeline simple, brief, and have a single environment in which to deploy it. To this end, the pipeline would not tackle multiple environments or feature branches, for example.\n\n1. To avoid manually creating a pipeline from scratch, we decided to once again leverage Chat. We entered the following prompt\n\n**_Create a .gitlab-ci.yml file with 3 stages: build, containerize, and deploy. Each of these stages should have a single job with the same name. The build job should compile the application natively using the -Pnative maven option and the builder image for mandrel jdk-22 for java17 and store the application executable and its Dockerfile as artifacts. The containerize job should use docker to build and push the image to the built-in container registry. The deploy job should rollout the containerized application, named quarkus-app, to the production environment in the Kubernetes cluster by creating a deployment resource, a service resource and an ingress rule with host ${KUBE_INGRESS_BASE_DOMAIN} and /hello prefix. The deploy job should also include the url for the production environment. When setting the Kubernetes context, use ${KUBE_CONTEXT}, and for application namespace, use ${KUBE_NAMESPACE}_**\n\n![create-pipeline-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-pipeline-prompt.png)\n\nThe prompt above provided some points that we thought would be useful and necessary for a good outcome. The requirements listed by the prompt were:\n\n- It specified that the pipeline needed to have three stages: build, containerize, and deploy.\n- Each stage would have a single job.\n- The build job should natively compile (not build) the application using the -Pnative option.\n- The build job should use the open source software `mandrel` and use the version that would support jdk-22 for Java 17. Remember that these were the versions that we tested on our local machine.\n- The build job would also need to pass the resulting executable and the project Dockerfile to the next job, i.e. containerize, in the pipeline.\n- The containerize job should build a docker image with the executable using the Dockerfile, both of which were passed as artifacts.\n- The containerize job should push the image to the GitLab container registry.\n- The deploy job should use the image from the GitLab container registry, name it `quarkus-app`, and deploy it to the Kubernetes cluster using deployment, service and ingress rule resources.\n- The deploy job should use the environment variables `${KUBE_INGRESS_BASE_DOMAIN}`, `${KUBE_CONTEXT}`, and `${KUBE_NAMESPACE}`, which had been defined as group/project variables, accordingly.\n\nChat responded with the following `.gitlab-ci.yml` example:\n\n```\n# Define the stages for the pipeline\nstages:\n  - build\n  - containerize\n  - deploy\n\n# Variables section to use GitLab CI/CD variables\nvariables:\n  MAVEN_OPTS: \"-Dmaven.repo.local=.m2/repository\"\n  MAVEN_CLI_OPTS: \"--batch-mode --errors --fail-at-end --show-version -DinstallAtEnd=true -DdeployAtEnd=true\"\n  CONTAINER_IMAGE: ${CI_REGISTRY_IMAGE}:${CI_COMMIT_SHA}\n\n# Cache configuration for Maven dependencies\ncache:\n  paths:\n    - .m2/repository\n\n# Build stage: Compile the application natively\nbuild:\n  stage: build\n  image:\n    name: quay.io/quarkus/ubi-quarkus-mandrel-builder-image:22.3-java17\n    entrypoint:\n      - \"\"\n  script:\n    - ./mvnw ${MAVEN_CLI_OPTS} clean package -Pnative\n  artifacts:\n    paths:\n      - target/*-runner\n      - src/main/docker/Dockerfile.native-micro\n  # This job runs only on the main branch\n  only:\n      - main\n\n# Containerize stage: Build and push Docker image\ncontainerize:\n  stage: containerize\n  image: docker:20.10.16\n  services:\n    - docker:20.10.16-dind\n  variables:\n    DOCKER_TLS_CERTDIR: \"/certs\"\n  before_script:\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n  script:\n    - docker build -f src/main/docker/Dockerfile.native-micro -t ${CONTAINER_IMAGE} .\n    - docker push ${CONTAINER_IMAGE}\n  # This job runs only on the main branch\n  only:\n      - main\n\n# Deploy stage: Deploy to Kubernetes\ndeploy:\n  stage: deploy\n  image: \n    name: bitnami/kubectl:latest\n    entrypoint: ['']\n  script:\n    - kubectl config use-context ${KUBE_CONTEXT}\n    - kubectl config set-context --current --namespace=${KUBE_NAMESPACE}\n    - |\n      cat \u003C\u003CEOF | kubectl apply -f -\n      apiVersion: apps/v1\n      kind: Deployment\n      metadata:\n        name: quarkus-app\n      spec:\n        replicas: 1\n        selector:\n          matchLabels:\n            app: quarkus-app\n        template:\n          metadata:\n            labels:\n              app: quarkus-app\n          spec:\n            containers:\n            - name: quarkus-app\n              image: ${CONTAINER_IMAGE}\n              ports:\n              - containerPort: 8080\n      ---\n      apiVersion: v1\n      kind: Service\n      metadata:\n        name: quarkus-app\n      spec:\n        selector:\n          app: quarkus-app\n        ports:\n        - port: 80\n          targetPort: 8080\n      ---\n      apiVersion: networking.k8s.io/v1\n      kind: Ingress\n      metadata:\n        name: quarkus-app\n      spec:\n        rules:\n        - host: ${KUBE_INGRESS_BASE_DOMAIN}\n          http:\n            paths:\n            - path: /hello\n              pathType: Prefix\n              backend:\n                service:\n                  name: quarkus-app\n                  port: \n                    number: 80\n      EOF\n    - echo \"Application deployed to https://${KUBE_INGRESS_BASE_DOMAIN}/hello\"\n  # This job runs only on the main branch\n  only:\n      - main\n```\n\n2. There were some things we needed to adjust in the sample `.gitlab-ci.yml` file above before we could commit it to our `main` branch. These are the updates we made to the file:\n\n- We deleted all occurrences of `only: -main` because we wanted to keep of pipeline definition file simple and with no branch-related rules.\n- We fixed the name of the file `Dockerfile.native-micro` to `Dockerfile.native`.\n\n3. At this point, we wanted to ensure that the deployment would be to the `production` environment so we asked Chat the following prompt:\n\n**_What is the syntax to specify an environment with its url in a pipeline?_**\n\n![how-to-add-env-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-add-env-prompt.png)\n\nThe response from Chat included an example of how to do this so we used this information to add the following environment block to our pipeline:\n\n```\n  environment:\n       name: production\n       url: http://${KUBE_INGRESS_BASE_DOMAIN}/hello\n```\n\n4. The example provided by Chat includes a URL that started with `https` and we modified that to `http` since we didn’t really need a secure connection for this simple application.\n\n5. Lastly, we noticed that in the `build` job, there was a script `mvnw` that we didn’t have in our project. So, we asked Chat the following:\n\n**_How can I get the mvnw script for Quarkus?_**\n\n![how-to-add-mvnw-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-add-mvnw-prompt.png)\n\nChat responded with the command to execute to bootstrap and create this script. We executed this command from a Terminal window:\n\n```\nmvn wrapper:wrapper\n```\n\nWe were now ready to commit all of our changes to the `main` branch and have the pipeline executed. However, on our first attempt, our first pipeline failed at the build job.\n\n## Troubleshooting using GitLab Duo Root Cause Analysis\n\nOur first attempt at running our brand-new pipeline failed. So, we took advantage of [GitLab Duo Root Cause Analysis](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/), which looks at the job logs and provides a thorough natural language explanation (with examples) of the root cause of the problem and, most importantly, how to fix it.\n\n![build-job-troubleshooting](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/build-job-troubleshooting.png)\n\nRoot Cause Analysis recommended we look at the compatibility of the command that was trying to be executed with the image of mandrel used in the build job. We were not using any command with the image so we concluded that it must have been the predefined `entrypoint` for the image itself. We needed to override this so we asked Chat the following:\n\n**_How do I override the entrypoint of an image using gitlab keywords?_**\n\n![how-to-override-entrypoint-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-override-entrypoint-prompt.png)\n\nChat replied with some use case examples of overriding an image entry point. We used that information to update the build job image definition:\n\n```\nbuild:\n    stage: build\n    image: quay.io/quarkus/ubi-quarkus-mandrel-builder-image:22.3-java17\n    entrypoint:\n        - “”\n```\n\nWe committed our changes to the `main` branch, which launched a new instance of the pipeline. This time the build job executed successfully but the pipeline failed at the `containerize` job.\n\n## Running a successful pipeline\n\nBefore drilling down into the log of the failed `containerize` job, we decided to drill into the log of the successfully completed build job first. Everything looked good in the log of the build job with the exception of this warning message at the very end of it:\n\n```\nWARNING: src/main/docker/Dockerfile.native: no matching files. Ensure that the artifact path is relative to the working directory …\n``` \n\nWe took notice of this warning and then headed to the log of the failed `containerize` job. In it, we saw that the `docker build` command had failed due to a non-existent Dockerfile. We ran Root Cause Analysis on the job and among its suggested fixes was for us to verify that the project structure matched the path of the specified `Dockerfile.native` file.\n\n![containerize-job-troubleshooting](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/containerize-job-troubleshooting.png)\n\nThis information confirmed our suspicion of the misplaced `Dockerfile.native` file. Instead of being at the directory `src/main/docker` as specified in the pipeline, it was located at the root directory of the project.\n\nSo, we went back to our project and updated every occurrence of the location of this file in our `.gitlab-ci.yml` file. We modified the two locations where this happened, one in the `build` job and one in the `containerize` job, as follows:\n\n```\nsrc/main/docker/Dockerfile.native\n```\n\nto\n\n```\nDockerfile.native\n```\n\nWe committed our updates to the `main` branch and this time our entire pipeline executed successfully!\n\n![pipeline-successful-run](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/pipeline-successful-run.png)\n\nOur last step was to check the running application in the `production` environment in our Kubernetes cluster.\n\n## Accessing the deployed application running in cluster\n\nOnce the pipeline ran successfully to completion, we drilled in the log file for the `deploy` job. Remember, this job printed the URL of the application at the end of its execution. We scrolled down to the bottom of the log and clicked on the `https` application link, which opened a browser window warning us that the connection was not private (we disabled `https` for the environment URL but forgot it for this string). We proceeded past the browser warning and then the string \"Hello World\" was displaced in the browser window indicating that the application was up and running in the Kubernetes cluster.\n\nFinally, to double-check our production deployment URL, we headed to the project **Operate > Environments** window, and clicked on the \"Open\" button for it, which immediately opened a browser window with the \"Hello World\" message.\n\n![app-running-on-k8s](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/app-running-on-k8s.png)\n\n## Try it \n\nWe created, compiled, built, and deployed a simple Quarkus application to a Kubernetes cluster using [GitLab Duo](https://about.gitlab.com/gitlab-duo/). This approach allowed us to be more efficient and productive in all the tasks that we performed and it helped us streamline our DevSecOps processes. We have shown only a small portion of how GitLab Duo's AI-powered capabilities can help you, namely Chat and Root Cause Analysis. There’s so much more you can leverage in GitLab Duo to help you create better software faster and more securely.\n\nWatch this whole use case in action:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/xDpycxz3RPY?si=HHZrFt1O_8XoLATf\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nAll the project assets we used are available [here](https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/quarkusn/quarkus-native).\n\n> [Try GitLab Duo for free for 60 days](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial&toggle=gitlab-duo-pro) and get started on exciting projects like this.",[9,724,703,2358,704,725,109],{"slug":3071,"featured":91,"template":680},"use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project","content:en-us:blog:use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project.yml","Use Gitlab Duo To Build And Deploy A Simple Quarkus Native Project","en-us/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project.yml","en-us/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project",{"_path":3077,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3078,"content":3084,"config":3089,"_id":3091,"_type":14,"title":3092,"_source":16,"_file":3093,"_stem":3094,"_extension":19},"/en-us/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance",{"title":3079,"description":3080,"ogTitle":3079,"ogDescription":3080,"noIndex":6,"ogImage":3081,"ogUrl":3082,"ogSiteName":693,"ogType":694,"canonicalUrls":3082,"schema":3083},"Use GitLab Duo Workflow to improve application quality assurance","Learn step-by-step how to add unit tests to a Java application using agentic AI (includes a video tutorial).","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097617/Blog/Hero%20Images/Blog/Hero%20Images/Workflow%201800x945_2gQoQIbY9NvjLFpXtsxtXy_1750097616649.png","https://about.gitlab.com/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Use GitLab Duo Workflow to improve application quality assurance\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2025-04-10\",\n      }",{"title":3079,"description":3080,"authors":3085,"heroImage":3081,"date":3086,"body":3087,"category":10,"tags":3088},[720],"2025-04-10","Assuring the quality of your applications via test-driven design, good code coverage, and issue detection is critically important to your customers and your reputation, but it can also be a time-consuming endeavor. [GitLab Duo Workflow](https://about.gitlab.com/gitlab-duo/agent-platform/), agentic AI built on top of the most comprehensive DevSecOps platform, can help you quickly complete development tasks such as adding unit tests to a Java application. This tutorial demonstrates how by using this sample [Java project](https://gitlab.com/gitlab-da/playground/csaavedra/gdw/prodmgr-gdw).\n\n> GitLab Duo Workflow is currently in private beta. Join the [waitlist](https://about.gitlab.com/gitlab-duo/agent-platform/) to see what’s possible with AI agents that understand your entire SDLC.\n\n## Opening your project in VS Code\n\n1. Open the Java project in Visual Studio Code (after cloning it to your local machine). Ensure that you’re in a feature branch (not the main or default branch) before you start. If you’re already working on a merge request, it will have its own associated feature branch.\n\n2. (This step is optional.) Navigate to the file that defines the Java class for which you’d like to have GitLab Duo Workflow create unit tests. Inspect it so that you can later confirm that the generated unit tests do cover its class members. This is what you would see:\n\n![File that defines the Java class for which you’d like to have GitLab Duo Workflow create unit tests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097627/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097627482.png)\n\n**Note:** We are assuming that you already enabled the GitLab Duo Workflow extension in your VS Code. If not, please refer to the [setup documentation](https://docs.gitlab.com/user/duo_workflow/#use-workflow-in-vs-code).\n\n3. Launch GitLab Duo Workflow by opening the VS Code command palette [Ctrl + Shift + P] and entering \"GitLab Duo Workflow\" in it and selecting **GitLab: Show Duo Workflow**. A tab will appear that looks like this:\n\n![Launching GitLab Duo Workflow with VS Code](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097628/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097627483.png)\n\n4. The next step is to add tests for the default constructor, the verification of the object creation, and the initial state of the properties of the Product class. To accomplish this, enter the following prompt in the text area in GitLab Duo Workflow:\n\n```unset\nCreate unit tests for class defined in the Product.java file and store the unit tests in its own file titled ProductTest.java\n```\n\n![Prompt area in GitLab Duo Workflow](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097628/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097627484.png)\n\n5. Click the **Start** button in the GitLab Duo Workflow window. Two new windows will appear: one in the center of the screen and one to the right. The one on the right displays the analysis that GitLab Duo Workflow is performing to come up with a plan that will achieve the goal as specified in your prompt. The plan is displayed in the center window. After the analysis and the plan are finished, you should see an output like this:\n\n![Analysis and plan generated by GitLab Duo Workflow](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097627/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750097627486.png)\n\n6. Review the analysis and plan and, if you are satisfied with them, click **Approve plan** at the bottom of the window.\n\n7. GitLab Duo Workflow will start executing the approved plan and making modifications to your project accordingly.\n\n8. Once the execution of the plan is finished, you will see a new directory `src/test/java/csaa/jspring/ProductManager` in the project with a new file in it named `ProductTest.java`, which contains all the unit tests for the `Product.java` class.\n\n![New directory in the project iwth a new file name `ProductTest.java`](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097628/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750097627488.png)\n\n9. Navigate to the newly created file `ProductTest.java` and you will see that it has some import statements underlined in red indicating some import errors:\n\n![`ProductTest.java` include imports statement and error indicators in red](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097628/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097627489.png)\n\nLet’s have GitLab Duo Workflow fix these for us.\n\n**Note:** We could have also asked GitLab Duo Workflow in our first prompt to update the `pom.xml` file accordingly. But since we didn’t, let’s fix these errors in a new workflow.\n\n## Launching a GitLab Duo Workflow to fix errors in generated code\n\n10. Start a new workflow by clicking on the **New workflow** button at the bottom of the analysis window on the right side of your screen.\n\n![New workflow button](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097628/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097627491.png)\n\n11. In the prompt text area, enter the following:\n\n```unset\nThe file ProductTest.java has an error “The import org.junit cannot be resolved”. Please fix it\n```\n\n12. After you approve the proposed plan, GitLab Duo Workflow starts its analysis by reading the current `pom.xml` file. It then edits it and removes the outdated JUnit dependency, and follows that with the addition of the correct dependency and version for JUnit. Lastly, it reads the `ProductTest.java` file to clear all the dependency errors.\n\n![GitLab Duo Workflow carrying out analysis by reading pom.xml](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097627/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097627492.png)\n\n## Watch the tutorial\n\nThrough the execution of this plan, GitLab Duo Workflow is effectively making updates to the project to achieve what was requested in the prompt, saving time and effort, and increasing productivity so that developers can spend more time innovating and creating value for their organization.\n\nIf you’d like to see what you read above in action, watch the following video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Tuj7TgqY81Q?si=RReuL1pUsLafvAzs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n> Sign up for the [GitLab Duo Workflow private beta waitlist](https://about.gitlab.com/gitlab-duo/agent-platform/) to see what’s possible with AI agents that understand your entire SDLC.\n\n## Read more about GitLab Duo Workflow and agentic AI\n\n- [GitLab Duo Workflow: Enterprise visibility and control for agentic AI](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/)\n- [GitLab Duo Workflow documentation](https://docs.gitlab.com/user/duo_workflow/)\n- [GitLab Duo](https://about.gitlab.com/gitlab-duo/)\n- [Agentic AI: Unlocking developer potential at scale (The Source)](https://about.gitlab.com/the-source/ai/agentic-ai-unlocking-developer-potential-at-scale/)\n",[9,482,724,725,704],{"slug":3090,"featured":6,"template":680},"use-gitlab-duo-workflow-to-improve-application-quality-assurance","content:en-us:blog:use-gitlab-duo-workflow-to-improve-application-quality-assurance.yml","Use Gitlab Duo Workflow To Improve Application Quality Assurance","en-us/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance.yml","en-us/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance",{"_path":3096,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3097,"content":3103,"config":3107,"_id":3109,"_type":14,"title":3110,"_source":16,"_file":3111,"_stem":3112,"_extension":19},"/en-us/blog/what-is-a-large-language-model-llm",{"title":3098,"description":3099,"ogTitle":3098,"ogDescription":3099,"noIndex":6,"ogImage":3100,"ogUrl":3101,"ogSiteName":693,"ogType":694,"canonicalUrls":3101,"schema":3102},"What is a large language model (LLM)?","Learn how large language models work, their applications, and their impact on the DevSecOps world.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749660057/Blog/Hero%20Images/LLM.jpg","https://about.gitlab.com/blog/what-is-a-large-language-model-llm","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What is a large language model (LLM)?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2025-05-29\",\n      }",{"title":3098,"description":3099,"authors":3104,"heroImage":3100,"date":1679,"body":3105,"category":10,"tags":3106},[1735],"Large language models (LLMs) are revolutionizing DevOps and DevSecOps approaches by simplifying complex tasks, such as code creation, log analysis, and vulnerability detection.\n\nIn this article, you will learn how LLMs work, their practical applications, and the main challenges to overcome in order to fully harness their potential.\n\n## What is an LLM?\n\nLLMs are artificial intelligence (AI) systems that can process and generate text autonomously. They are trained by analyzing vast amounts of data from a variety of sources, enabling them to master the linguistic structures, contextual relationships, and nuances of language.\n\nLLMs are a major breakthrough in the field of AI. Their ability to process, generate, and interpret text relies on sophisticated machine learning and natural language processing (NLP) techniques. These systems do not just process individual words; they analyze complex sequences to capture the overall meaning, subtle contexts, and linguistic nuances.\n\n## How do LLMs work?\n\nTo better understand how they work, let's explore some of the key features of large language models.\n\n### Supervised and unsupervised learning\n\nLLMs are trained using two complementary approaches: supervised learning and unsupervised learning. These two approaches to machine learning maximize their ability to analyze and generate text.\n\n* **Supervised learning** relies on labeled data, where each input is associated with an expected output. The model learns to associate these inputs with the correct outputs by adjusting its internal parameters to reduce prediction errors. Through this approach, the model acquires precise knowledge about specific tasks, such as text classification or named entity recognition.\n\n* **Unsupervised learning (or machine learning)**, on the other hand, does not require labeled data. The model explores large volumes of text to discover hidden structures and identify semantic relationships. The model is therefore able to learn recurring patterns, implicit grammatical rules in the text, and contextualization of sentences and concepts. This method allows LLMs to be trained on large corpora of data, greatly accelerating their progress without direct human action.\n\nBy combining these two approaches, large language models gain the advantages of both precise, human-guided learning and unlimited autonomous exploration. This complementarity allows them to develop rapidly, while continuously improving their ability to understand and generate text coherently and contextually.\n\n### Learning based on a large volume of data\n\nLLMs are trained on billions of sentences from a variety of sources, such as news articles, online forums, technical documentation, scientific studies, and more. This variety of sources allows them to acquire a broad and nuanced understanding of natural language, ranging from everyday expressions to specialized terminology.\n\nThe richness of the data used is a key factor in LLMs' performance. Each source brings different writing styles, cultural contexts, and levels of technicality. \n\nFor example:\n\n* **News articles** to master informative and factual language \n* **Online forums** to understand specialized communities' informal conversations and technical language  \n* **Technical documentation and scientific studies** to assimilate complex concepts and specific terminology, particularly in areas such as DevOps and DevSecOps\n\nThis diversity of content allows LLMs to recognize complex linguistic structures, interpret sentences in different contexts, and adapt to highly technical domains. In DevSecOps, this means understanding commands, configurations, security protocols, and even concepts related to the development and maintenance of computer systems.\n\nWith this large-scale training, LLMs can accurately answer complex questions, write technical documentation, or identify vulnerabilities in computer systems.\n\n### Neural network architecture and \"deep learning\"\n\nLLMs are based on advanced neural network architectures. These networks are specially designed to process large sequences of text while maintaining an accurate understanding of the context. This deep learning-based training is a major asset in the field of NLP.\n\nThe best-known of these structures is the architecture of sequence-to-sequence models (transformers). This architecture has revolutionized NLP with its ability to simultaneously analyze all parts of a text, unlike sequential approaches that process words one by one.\n\nSequence-to-sequence models excel at processing long texts. For example, in a conversation or a detailed technical document, they are able to link distant information in the text to produce precise and well-reasoned answers. This context management is essential in a DevSecOps approach, where instructions can be complex and spread over multiple lines of code or configuration steps.\n\n### Predictive text generation\n\nWhen the user submits a text, query, or question, an LLM uses its predictive ability to generate the most likely sequence, based on the context provided.\n\nThe model analyzes each word, studies grammatical and semantic relationships, and then selects the most suitable terms to produce a coherent and informative text. This approach makes it possible to generate precise, detailed responses adapted to the expected tone.\n\nIn DevSecOps environments, this capability becomes particularly useful for:\n\n* **Coding assistance:** generation of code blocks or scripts adapted to specific configurations\n* **Technical problem solving:** proposing solutions based on descriptions of bugs or errors\n* **Drafting technical documentation:** automatic creation of guides, manuals, or instructions\n\nPredictive text generation thus makes it possible to automate many repetitive tasks and speed up technical teams' work.\n\n## Applications of large language models in a DevSecOps approach\n\nWith the rise of automation, LLMs have become indispensable allies for technical teams. Their ability to understand and generate text contextually enables them to effectively operate in complex environments such as [DevSecOps](https://about.gitlab.com/topics/devsecops/).\n\nWith their analytical power and ability to adapt to specific needs, these models offer tailored solutions to streamline processes and lighten technical teams' workload.\n\nDevelopment teams can leverage LLMs to automatically transform functional specifications into source code. \n\nWith this capability, they can perform the following actions:\n- generate complex automation scripts\n- create CI/CD pipelines tailored to specific business processes\n- produce customized security patches\n- generate code explanation and create documentation\n- refactor code by improving code structure and readability without changing functionality\n- generate tests\n\nBy relying on LLMs, teams are able to accelerate the development of their software while reducing the risk of human error.\n\n### Improved documentation and knowledge sharing\n\nThese powerful tools make it easy to create customized user manuals, API descriptions, and tutorials that are perfectly tailored to each user's level of expertise. By leveraging existing knowledge bases, LLMs create contextual answers to frequently asked questions. This enhances knowledge sharing within teams, speeds up onboarding of new members, and helps centralize best practices.\n\n### Incident management and troubleshooting\nDuring an incident, LLMs play a crucial role in analyzing logs and [trace files](https://docs.gitlab.com/ee/development/tracing.html) in real time. Thanks to their ability to cross-reference information from multiple sources, they identify anomalies and propose solutions based on similar past incidents. This approach significantly reduces diagnosis time. In addition, LLMs can automate the creation of detailed incident reports and recommend specific corrective actions.\n\n### Creating and improving CI/CD pipelines\n\nLLMs are revolutionizing the configuration of [CI/CD pipelines](https://about.gitlab.com/topics/ci-cd/cicd-pipeline/). They can not only help create pipelines, but also automate this process and suggest optimal configurations based on industry standards. By adapting workflows to your specific needs, they ensure perfect consistency between different development environments. Automated testing is enhanced by relevant suggestions, limiting the risk of failure. LLMs also continuously monitor the efficiency of pipelines and adjust processes to ensure smooth and uninterrupted rollout.\n\n### Security and compliance\n\nIn a DevSecOps environment, large language models become valuable allies for security and compliance. They parse the source code for potential vulnerabilities and generate detailed patch recommendations. LLMs can also monitor the application of security standards in real time, produce comprehensive compliance reports, and automate the application of security patches as soon as a vulnerability is identified. This automation enhances overall security and ensures consistent compliance with legal and industry requirements.\n\n## What are the benefits of large language models?\n\nLLMs are radically reshaping DevOps and DevSecOps approaches, bringing substantial improvements in productivity, security, and software quality. By integrating with existing workflows, LLMs are disrupting traditional approaches by automating complex tasks and providing innovative solutions.\n\n### Improved productivity and efficiency\n\nLLMs play a central role in improving technical teams' productivity and efficiency. By automating a wide range of repetitive tasks, they free development teams from routine operations, allowing them to focus on strategic activities with higher added value.\n\nIn addition, LLMs act as intelligent technical assistants capable of instantly providing relevant code snippets, tailored to the specific context of each project. In this way, they significantly reduce research time by offering ready-to-use solutions to assist teams in their work. This targeted assistance speeds up problem solving and reduces disruptions in workflows.\nAs a result, productivity increases and projects move forward more quickly. Technical teams can take on more tasks without compromising the quality of deliverables.\n\n### Improved code quality and security\n\nThe use of large language models in software development is a major lever for improving both code quality and application security. With their advanced analytical capabilities, LLMs can scan source code line by line and instantly detect syntax errors, logical inconsistencies, and potential vulnerabilities. Their ability to recognize defective code allows them to recommend appropriate fixes that comply with industry best practices.\n\nLLMs also play a key preventive role. They excel at identifying complex security flaws that are often difficult for humans to detect. By analyzing dependencies, they can flag obsolete or vulnerable libraries and recommend more secure, up-to-date versions. This approach contributes to maintaining a secure environment that complies with current security standards.\n\nBeyond fixing existing errors, LLMs offer improvements by suggesting optimized coding practices and project structures. They can generate code that meets the most advanced security standards from the earliest stages of development.\n\n### Accelerating development lifecycles\n\nLarge language models play a key role in accelerating software development lifecycles by automating key tasks that would otherwise tie up valuable human resources. Complex and repetitive tasks, such as writing functions, creating unit tests, or implementing standard components, are automated in a matter of moments.\n\nLLMs also speed up the validation phase with their ability to suggest complete and appropriate test cases. They ensure broader test coverage in less time, reducing the risk of errors and enabling early detection of anomalies. This preventive approach shortens the correction cycle and limits delays related to code quality issues.\n\nBy simplifying technical tasks and providing fast and tailored solutions, large language models enable businesses to respond to market demands in a more agile way. This acceleration of the development lifecycle results in more frequent updates, faster iterations, and a better ability to adapt products to users' changing needs.\n\nDevelopment lifecycles are becoming shorter, providing a critical strategic advantage in an increasingly demanding technology landscape.\n\n## What are the challenges of using LLMs?\n\nDespite their many benefits, large language models have certain limitations that require careful management. Their effectiveness depends heavily on the quality of the data used during their training and regular updates to their knowledge bases. In addition, issues related to algorithmic bias, data security, and privacy can arise, exposing companies to operational and legal risks. Rigorous human oversight remains essential in order to ensure the reliability of results, maintain regulatory compliance, and prevent critical errors.\n\n### Data privacy and security\n\nTraining LLMs relies on large volumes of data, often from diverse sources, raising questions about the protection of confidential information. Sensitive data shared with cloud platforms can therefore be exposed to potential breaches. This is of particular concern to companies operating in regulated sectors. \n\nIn Europe, where strict regulations like GDPR govern data management, many companies are reluctant to transfer their information to external services. Regulatory requirements, coupled with the fear of unauthorized exploitation of sensitive data, have led some companies to opt for self-hosted solutions to maintain complete control over their systems.\n\nProviders like GitLab have put in place robust security guarantees, such as intentional non-retention of personal data and end-to-end encryption. However, this may not be enough for the most demanding customers, who prefer complete control of their environments. Implementing hybrid or on-premises solutions then becomes a strategic necessity to meet the security requirements of certain companies.\n\nLearn more about GitLab Duo Self-Hosted by clicking on the image below to access our product tour.\n\n[![GitLab Duo Self-Hosted tour](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673815/Blog/Content%20Images/Screenshot_2025-05-29_at_8.29.30%C3%A2__AM.png)](https://gitlab.navattic.com/gitlab-duo-self-hosted)\n\n### Accuracy and reliability\n\nAlthough large language models are capable of producing impressive results, their performance is not infallible. They can produce incorrect, incomplete, or inconsistent answers. This inaccuracy becomes particularly problematic in the context of critical tasks such as generating security code or analyzing sensitive data.\n\nIn addition, LLMs operate on the basis of probabilistic models, which means that they do not truly \"understand\" the content they process, but produce predictions based on statistical probabilities. This can lead to technically incorrect or even dangerous recommendations when used without human validation.\n\nTo avoid these pitfalls, it is essential to maintain constant oversight and establish rigorous validation processes. The results provided by LLMs must always be reviewed by humans before being integrated into critical systems.\n\nA strategy of regular model updates, combined with proactive human oversight, can reduce errors and gradually improve the reliability of results.\n\n## How GitLab uses LLMs for GitLab Duo features\n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/) harnesses the power of large language models to transform DevSecOps processes by integrating AI-powered capabilities throughout the software development lifecycle. This approach aims to improve productivity, strengthen security, and automate complex tasks so that development teams can focus on high added-value tasks.\n\n### AI-assisted software development\n\nGitLab Duo provides continuous support throughout the software development lifecycle with real-time recommendations. Development teams can automatically generate unit tests, get detailed explanations of complex code segments, and benefit from suggestions to improve the quality of their code.\n\n### Proactive CI/CD failure analysis\n\nOne of the key features of GitLab Duo is its assistance in analyzing CI/CD job failures. With LLM and AI, teams are able to quickly identify sources of errors in their continuous integration and deployment pipelines. \n\n### Enhanced code security\n\nGitLab Duo incorporates AI-based security features. The system detects vulnerabilities in the source code and proposes detailed patches to reduce the risks. Teams receive clear explanations of the nature of the vulnerabilities identified and can apply automated patches via [merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/) generated directly by GitLab Duo. This feature helps secure development without slowing down development lifecycles.\n\nLearn more about GitLab Duo Vulnerability Explanation and Resolution by clicking on the image below to access our product tour.\n\n[![Vulnerability report interactive tour](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673816/Blog/Content%20Images/Screenshot_2025-05-29_at_8.32.15%C3%A2__AM.png)](https://gitlab.navattic.com/ve-vr-short)\n\n### Key features of GitLab Duo\n\n* [GitLab Duo Chat](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/): This conversational feature processes and generates text and code intuitively. It allows users to quickly search for relevant information in large volumes of text, including in tickets, [epics](https://docs.gitlab.com/ee/user/group/epics/), source code, and [GitLab documentation](https://docs.gitlab.com/).\n\n* [GitLab Duo Self-Hosted](https://about.gitlab.com/blog/gitlab-duo-self-hosted-enterprise-ai-built-for-data-privacy/): GitLab Duo Self-Hosted allows companies with strict data privacy requirements to benefit from GitLab Duo's AI capabilities with flexibility in choosing deployment and LLMs from a list of supported options.\n\n* [GitLab Duo Code Suggestions](https://about.gitlab.com/direction/create/code_creation/code_suggestions/): Development teams benefit from automated code suggestions, allowing them to write secure code faster. Repetitive and routine coding tasks are automated, significantly speeding up software development lifecycles.\n\nGitLab Duo is not limited to these features. It offers a wide range of features designed to simplify and optimize software development. Whether it's automating testing, improving collaboration between teams, or strengthening project security, GitLab Duo is a complete solution for smart and efficient DevSecOps processes.\n\nLearn more about GitLab Duo Enterprise by clicking on the image below to access our product tour. \n\n[![GitLab Duo Enterprise interactive tour](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673816/Blog/Content%20Images/Screenshot_2025-05-29_at_8.33.40%C3%A2__AM.png)](https://gitlab.navattic.com/duo-enterprise)",[9,703],{"slug":3108,"featured":6,"template":680},"what-is-a-large-language-model-llm","content:en-us:blog:what-is-a-large-language-model-llm.yml","What Is A Large Language Model Llm","en-us/blog/what-is-a-large-language-model-llm.yml","en-us/blog/what-is-a-large-language-model-llm",{"_path":3114,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3115,"content":3120,"config":3125,"_id":3127,"_type":14,"title":3128,"_source":16,"_file":3129,"_stem":3130,"_extension":19},"/en-us/blog/what-the-ml-ai",{"title":3116,"description":3117,"ogTitle":3116,"ogDescription":3117,"noIndex":6,"ogImage":820,"ogUrl":3118,"ogSiteName":693,"ogType":694,"canonicalUrls":3118,"schema":3119},"What the ML is up with DevSecOps and AI?","AI will revolutionize DevSecOps platforms. Learn about where GitLab is today and what we're working on.","https://about.gitlab.com/blog/what-the-ml-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What the ML is up with DevSecOps and AI?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2023-03-16\",\n      }",{"title":3116,"description":3117,"authors":3121,"heroImage":820,"date":3122,"body":3123,"category":10,"tags":3124},[1410],"2023-03-16","\n\nGitLab believes at our core that [AI will revolutionize the power of DevSecOps platforms](//topics/devops/the-role-of-ai-in-devops/) to bring to life a software development experience that feels straight out of science fiction. GitLab users already benefit from a step-function increase in productivity when they adopt our platform: streamlined collaboration, operational efficiencies, and massive acceleration in time to delivery. But by introducing machine learning (ML) and other artificial intelligence (AI) capabilities into the fabric of The DevSecOps Platform feature set, we aim to take those gains to a whole new level. \n\nGitLab occupies a unique seat in relation to defining how AI and ML will impact DevSecOps into the future. As the creators of the DevSecOps platform category, we are the founders behind a successful philosophy for bringing DevSecOps principles into practice. By virtue of curating the entire software development lifecycle, our platform also has an unrivaled level of visibility into the code, configuration, testing, deployment, and operation of the applications it produces. It is in the rich data set underpinning that curated experience where unbounded opportunity lurks. These opportunities include the ability to deliver:\n\n- **Faster deployments**: By automating various aspects of the software development lifecycle, including testing and deployment, [AI/ML can help DevSecOps](/blog/why-ai-in-devops-is-here-to-stay/) teams deliver software faster and more reliably.\n- **Improved security**: AI/ML can help identify and mitigate potential security threats by analyzing data patterns and behavior. It can also automate security testing and analysis, leading to faster and more accurate detection and remediation of vulnerabilities.\n- **Enhanced quality assurance**: AI/ML can help automate quality assurance processes by analyzing data patterns and identifying potential issues in code, leading to faster testing, fewer bugs, and higher quality software.\n- **Intelligent monitoring and alerting**: AI/ML can help monitor systems in real time, analyzing data from logs, alerts, and other sources to detect anomalous behavior and potential security threats.\n- **Predictive analytics**: AI/ML can help DevSecOps teams predict potential issues, identify patterns, and make data-driven decisions to improve their software before issues become critical.\n\nGitLab will focus on incorporating AI/ML capabilities that leverage our unique strengths to deliver unique value. In particular, we plan to:\n\n- incorporate generative AI into GitLab to massively simplify, accelerate, or entirely obviate parts of the software development process,\n- use the unique data set at our disposal to make novel connections and surface insights to users about their teams, their processes, and the software they are building - insights that are otherwise lost to the voids of piecemeal DevOps toolchains.\n- build the MLOps and DataOps plumbing that will enable organizations to build and deploy AI/ML workloads using GitLab.\n\n## AI/ML in GitLab today\n\nHere are some of the ways we are using AI/ML in GitLab today.\n\n### AI/ML for automation\n\nFirst, we are applying ML and AI to automate mundane tasks and reduce the cognitive load for our customers. We are currently developing [AI Assisted capabilities](/direction/modelops/ai_assisted/) to improve productivity and efficiency for everyone in the software delivery workflow. Here are some AI Assisted capabilities available in GitLab today:\n\n![Suggested Review Screenshot](https://about.gitlab.com/images/15_4/create-code-review-suggested-reviewers.png){: .shadow.col-sm-4.right.wrap-text}\n\n- **Suggested Reviewers**, [released last September](/releases/2022/09/22/gitlab-15-4-released/#suggested-reviewers-open-beta), automatically suggests the best available reviewer for a merge request. This capability removes the guesswork by ensuring the right reviewer with the right contextual knowledge is reviewing code changes so that developers can deploy software more efficiently. Early users have told us that Suggested Reviewers minimizes delays and leads to better reviews. They now have more confidence in the code they deploy. The tool has generated tens of thousands of suggested reviewers to more efficiently and securely review code on our platform. You can learn more about the feature and how to enable it in [our Suggested Reviewers documentation](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/#suggested-reviewers).\n\n- **GitLab Code Suggestions**, [released in closed beta this past February](/releases/2023/02/22/gitlab-15-9-released/#code-suggestions-available-in-closed-beta), aims to increase developer speed and productivity by providing code suggestions in GitLab’s VS Code IDE plugin. We’re actively building this into GitLab’s new Web IDE and Remote Development solution as well. Ultimate customers interested in joining the beta of Code Suggestions [can fill out this form](https://forms.gle/cbjqJhLGV1i7t6Sd8). Additional information about Code Suggestions can be found on [our direction page](/direction/modelops/ai_assisted/code_suggestions/). \n\n![Animated gif image of code suggestions](https://about.gitlab.com/images/15_9/DemoFastApi.gif){: .shadow}\n\n### Protecting customer source code\n\nWe’ve heard from many of our enterprise DevSecOps customers that their organizations care deeply about the privacy of their source code. They understandably want control over who processes their data and if their code is used to train code generation AI models. That’s why we’ve built our code suggestions feature to work natively within GitLab. Customer source code does not leave the GitLab instance and it is not used to retrain generic multi-customer code generation models.\n\n## The road ahead for GitLab and AI\n\nWe plan to add many AI capabilities throughout our DevSecOps platform, including: \n\n- automating mundane tasks across the software development lifecycle with [Workflow Automation](/direction/modelops/ai_assisted/workflow_automation/) including assigning, labeling, and summarizing. \n- reducing the risk due to insecure coding practices by automatically detecting and help remediating code quality and security vulnerabilities with [Intelligent Code Security](/direction/modelops/ai_assisted/intelligent_code_security/). \n- augmenting developers with generative [Code Suggestions](/direction/modelops/ai_assisted/code_suggestions/) while writing, reviewing, and fixing code.\n\nWe also want to make it easier for customers to build and deploy amazing AI/ML-backed applications to their customers faster. We are working to integrate [ModelOps](/direction/modelops/) features into the GitLab DevSecOps Platform to better support data science workloads and extend DevSecOps workflows to AI and ML workloads. This includes: \n\n- enabling data science teams to work seamlessly within the Gitlab platform with [better support for python notebooks](https://docs.gitlab.com/ee/user/project/repository/jupyter_notebooks/) and [GPU runners](https://docs.gitlab.com/runner/configuration/gpus.html).\n- improving handoffs between data science teams and DevSecOps teams with a native [GitLab Model Registry](https://gitlab.com/groups/gitlab-org/-/epics/9423).\n\n## Follow along\n\nThis blog is the first in [an ongoing series](/blog/ai-ml-in-devsecops-series/) about GitLab’s journey to [build and integrate ML/AI into our DevSecOps platform. Throughout the series, we’ll feature blogs from our product, engineering, and UX teams that will showcase how we’re infusing AI/ML into GitLab. \n\nWe believe AI is going to dramatically change the way teams work and the way organizations develop, secure, and operate software. We’re using our core value of iteration and our experience building the most comprehensive DevSecOps platform to bring the power of AI/ML to bear on the software development lifecycle. \n\nWant to continue reading about AI? Check out the next blog in this series:[How AI-assisted code suggestions will advance DevSecOps](https://about.gitlab.com/blog/ai-assisted-code-suggestions/)!\n\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[9,704],{"slug":3126,"featured":6,"template":680},"what-the-ml-ai","content:en-us:blog:what-the-ml-ai.yml","What The Ml Ai","en-us/blog/what-the-ml-ai.yml","en-us/blog/what-the-ml-ai",{"_path":3132,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3133,"content":3139,"config":3144,"_id":3146,"_type":14,"title":3147,"_source":16,"_file":3148,"_stem":3149,"_extension":19},"/en-us/blog/whats-next-for-devsecops",{"title":3134,"description":3135,"ogTitle":3134,"ogDescription":3135,"noIndex":6,"ogImage":3136,"ogUrl":3137,"ogSiteName":693,"ogType":694,"canonicalUrls":3137,"schema":3138},"GitLab’s 2023 predictions: What’s next for DevSecOps?","Check out insights on securing the supply chain, new uses for AI/ML, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663820/Blog/Hero%20Images/prediction.jpg","https://about.gitlab.com/blog/whats-next-for-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab’s 2023 predictions: What’s next for DevSecOps?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2023-01-26\",\n      }",{"title":3134,"description":3135,"authors":3140,"heroImage":3136,"date":3141,"body":3142,"category":1156,"tags":3143},[2934],"2023-01-26","\nIn 2023, organizations will focus their time and resources on the continued shift left of security, completing the evolution from DevOps to [DevSecOps](/topics/devsecops/). GitLab Chief Marketing and Strategy Officer [Ashley Kramer](https://gitlab.com/akramer) says that every company will need to have security tightly integrated into DevOps to combat the increased threats throughout the software development lifecycle. In addition, DevSecOps teams will have to continue to focus on supply chain security, make optimal use of artificial intelligence and machine learning, and expand their use of value stream analytics. GitLab leaders from across disciplines share these predictions and more about how the industry will change this year.\n\n## Prediction 1: Protecting the supply chain will be the top priority\n\nSecurity will continue to be an organization-wide responsibility, shifting further left and spanning from [the IDE](/blog/get-ready-for-new-gitlab-web-ide/) to applications running in production, according to  [David DeSanto](https://gitlab.com/david), Chief Product Officer.\n\nIn our [2022 Global DevSecOps survey](https://about.gitlab.com/developer-survey/previous/2022/), 57% of sec team members said their orgs have either shifted security left or are planning to this year. Half of security professionals report that developers are failing to identify security issues – to the tune of 75% of vulnerabilities.\n\nThe shift left will be driven in part by the need for [tighter security for software supply chains](/blog/the-ultimate-guide-to-software-supply-chain-security/). “As remote development becomes more and more commonplace, software supply chain security will play a more expansive role across the software development lifecycle,” DeSanto says.\n\n[Francis Ofungwu](https://gitlab.com/fofungwu), Global Field CISO, predicts this supply chain security evolution will happen in three key ways:\n\n- The engineering frontlines will take on more ownership of managing threats in their day-to-day operations. In order to accomplish this, developers will need real-time context on vulnerabilities and remediation strategies in each phase of the software development lifecycle (SDLC), consequently reducing the likelihood of painful incidents in production environments.\n\n- Security and compliance teams will invest in transcribing their software assurance expectations into policy-as-code to reduce the manual and time-consuming security review processes that reduce development velocity.\n\n- As a result of headline-grabbing incidents highlighting enterprise risks in modern software development, organizations will build audit programs to better assess and report SDLC risks. This will require organizations to design how to deliver artifacts that prove the immutability of the controls deployed across all aspects of their development toolchain. \n\nOrganizations should also expect that “what have been best practices for supply chain security for many years, will now become regulatory requirements,” says [Corey Oas](https://gitlab.com/corey-oas), Manager, Security Compliance (Dedicated Markets). He points to [artifact attestation and software bill of materials (SBOM) generation](/blog/the-ultimate-guide-to-sboms/) as examples of best practices that will soon become federal government or industry mandates. “Both of these are integral to developer workflows.” \n\n[Sam White](https://gitlab.com/sam.white), Group Manager, Product - Govern, doubles down on the SBOM and artifact attestation prediction, saying both SBOMs and attestations will need ongoing attention from DevSecOps teams. “Expect to see a shift from looking at these as one-time events to them becoming part of a continuous evaluation process,” he says, adding that organizations will need deeper visibility into software dependencies (e.g. open source packages) and more centralization of software build information.\n\nAnother element of software supply chain security is [zero trust](/blog/why-devops-and-zero-trust-go-together/). “Organizations have considered zero trust strategies for a while, and it will be an implementation focus for them going forward,” predicts [Joel Krooswyk](https://gitlab.com/jkrooswyk), GitLab Federal CTO. “One reason for this movement, at least among federal agencies and their suppliers, is the recent release of the Department of Defense zero trust architecture strategy and roadmap and the inclusion of zero trust principles in several National Institute of Standards and Technology publications such as [800-207](https://csrc.nist.gov/publications/detail/sp/800-207/final).”\n\n> Get more public sector predictions with our webcast [“2022 Lookback & 2023 Predictions in Cybersecurity & Zero Trust with GitLab”](https://page.gitlab.com/2022_devsecopsusecase_Lookback_Predictions_PubSec_RegistrationPage.html)\n\n## Prediction 2: Security will burrow deep into DevOps education\n\nTo mirror the transformation of DevOps to DevSecOps, [DevOps training and education](/blog/5-ways-to-bring-devops-to-your-campus/) will include security as a key part of the curricula, White says. “Organizations will have to provide access to the training that developers need to get a baseline security knowledge, including why certain vulnerabilities are important and should be addressed right away,” he says.\n\n[Pj Metz](https://gitlab.com/PjMetz), Education Evangelist, believes 2023 will be the year that “Shift Left principles will show up in university classrooms.”\n\n“Already, the GitLab for Education team has seen more and more requests for information on DevSecOps, and not just in computer science and programming. Information systems students are looking to learn more about DevSecOps as well,” he says. ”Integrating security education directly into DevOps curricula will ensure that future professionals will be prepared for all aspects of DevSecOps.”\n\nAnd he encourages DevOps students to [ask for security to be added into their education](https://about.gitlab.com/the-source/security/the-future-of-devops-education-needs-to-include-security/) so they will be properly prepared for the workforce. \n\n## Prediction 3: AI/ML will be used throughout the SDLC\n\n“AI will become essential for productivity,” Kramer says. “For example, DevOps teams will integrate AI/ML to automate repetitive and difficult tasks. Ideally, this would ease the burden on developers by removing their cognitive load, decreasing the amount of context-switching they have to do, and enabling them to stay in the flow of development.\"\n\nAccording to our 2022 Global DevSecOps survey, 62% of respondents practice ModelOps, while 51% use AI/ML to check code.\n\n“Combining digital transformation with business analytics and AI - real transformations are possible,” says [Christina Hupy](https://gitlab.com/c_hupy), Sr. Manager, Community Programs. “As more of their data is input, businesses can draw actual insights and use AI to continuously improve their systems.”\n\nDeSanto agrees and predicts that [AI-assisted workflows will gain popularity](/blog/why-ai-in-devops-is-here-to-stay/) in application development. “AI/ML will further enable rapid development, security remediation, improved test automation, and better observability,” he says.\n\n[Taylor McCaslin](https://gitlab.com/tmccaslin), Group Manager of Product for Data Science, says that while AI/ML certainly isn’t new, making technologies such as open-ended AI accessible to consumers, set an expectation to figure out how it could be better used in software development (think code completion and other such tasks).\n\nHe predicts that while AI/ML will be used all along the SDLC, organizations will grapple with privacy concerns, preserving intellectual property (such as AI-generated code ownership) and permissiveness of licenses for training data sets and algorithms.\n\nAt the same time, he says to look for “more rapid development in the MLOps and DataOps spaces to help developers manage, maintain, and iterate on production software systems that leverage ML and AI.” (Note: GitLab is investing in our ModelOps stage to help support the development of data science-enriched software within the GitLab platform.)\n\n## Prediction 4: Value stream analytics will take on a greater role in organizations\n\nThe digital transformation that organizations will undergo this year will require a deeper commitment to [examining value streams](/blog/the-gitlab-quarterly-how-our-latest-beta-releases-support-developers/). “Value stream analytics will extend past development workflows to provide a more holistic view of the value organizations deliver to their users (both internal and external),” DeSanto says.\n\nExecutive leadership will seek out metrics that give insight into how digital transformation and technological investments are delivering value and driving business results. This is a shift from solely focusing on development efficiencies. The 2022 Global DevSecOps survey found that 75% of respondents are either using a DevOps platform or plan to move to one within a year with one of the drivers of this change being metrics and observability.\n\n## Prediction 5: Observability will shift left for efficient DevSecOps \n\n[Observability](/direction/monitor/platform-insights/) will also move further left in the SDLC, according to [Michael Friedrich](https://gitlab.com/dnsmichi), Senior Developer Evangelist. “Observability-driven development will enable everyone to become more efficient and inspire innovation,\" he says.\n\nNew observability-enabling technologies like [eBPF](https://ebpf.io/what-is-ebpf) will help developers with automated code instrumentation instead of adding more workload with manual code instrumentation. eBPF also supports better observability and security workflows in cloud-native environments.\n\nObservability will play a critical role in improving the efficiency of DevSecOps workflows, including CI/CD, infrastructure cost analysis, and trending/forecasting for better capacity planning.\n\n_What do you think will be the big DevSecOps technology advancements this year? Let us know your predictions in the comments below._\n\n## Engage with DevSecOps experts\n\nWant to dig deeper into how to innovate while still keeping an eye on cost efficiencies? Sign up for our webcast [“GitLab’s DevSecOps Innovations and Predictions for 2023”](https://page.gitlab.com/webcast-gitlab-devsecops-innovations-predictions-2023.html?utm_medium=blog&utm_source=gitlab&utm_campaign=devopsgtm&utm_content=fy23q4release) on Jan. 31 to get expert advice and insights about this era of DevSecOps transformation and the tools and strategies you’ll need to meet this challenge. \n[Register](https://page.gitlab.com/webcast-gitlab-devsecops-innovations-predictions-2023.html?utm_medium=blog&utm_source=gitlab&utm_campaign=devopsgtm&utm_content=fy23q4release) today!\n\nCover image by [Drew Beamer](https://unsplash.com/@dbeamer_jpg?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com/)\n{: .note}\n",[849,678,9,704],{"slug":3145,"featured":6,"template":680},"whats-next-for-devsecops","content:en-us:blog:whats-next-for-devsecops.yml","Whats Next For Devsecops","en-us/blog/whats-next-for-devsecops.yml","en-us/blog/whats-next-for-devsecops",{"_path":3151,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3152,"content":3157,"config":3162,"_id":3164,"_type":14,"title":3165,"_source":16,"_file":3166,"_stem":3167,"_extension":19},"/en-us/blog/why-hackerone-gets-love-letters-from-developers",{"title":3153,"description":3154,"ogTitle":3153,"ogDescription":3154,"noIndex":6,"ogImage":2275,"ogUrl":3155,"ogSiteName":693,"ogType":694,"canonicalUrls":3155,"schema":3156},"Why HackerOne gets love letters from its developers","Learn how HackerOne is making developers more productive while ensuring that security is built into their software development processes.","https://about.gitlab.com/blog/why-hackerone-gets-love-letters-from-developers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why HackerOne gets love letters from its developers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2023-11-14\",\n      }",{"title":3153,"description":3154,"authors":3158,"heroImage":2275,"date":3159,"body":3160,"category":2729,"tags":3161},[1334],"2023-11-14","At [HackerOne](/customers/hackerone/), a cybersecurity company, using the GitLab DevSecOps Platform has changed developers’ team experience and culture so much they’re basically sending the company love letters about their jobs.\n\nThis is no exaggeration, according to HackerOne Senior Director of Platform and Infrastructure Russell Owen, who spoke to an audience at GitLab’s DevSecOps World Tour in Mountain View, California, this fall. The 11-year-old company adopted GitLab in 2018 for source code and issues management, CI/CD, and security and compliance – features that didn’t exist in its previous tooling system. Since then, developers have become more productive — and happier.\n\nHackerOne isn’t alone here. According to GitLab’s [2023 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2023/), 28% of the more than 5,000 respondents said using DevOps made their developers happier. Productivity, efficiency, and automation go a long way to making developers’ jobs easier and more enjoyable. \n\n## Measuring for DevSecOps impact\n\n“We run surveys every quarter. Are the developers happy? What do they find frustrating? Where's the friction?” Owen said in an on-stage interview with [Sherrod Patching](https://about.gitlab.com/company/team/#spatching), vice president of customer success management at GitLab. “We compare our metrics across the industry to see how we're doing.”\n\nAnd they’re doing well. “For instance, just making our [CI/CD pipelines](/blog/how-to-learn-ci-cd-fast/) as tight as possible makes people more effective. We've done a lot of work on optimizing that inside of GitLab,” Owen added. “And we get what count as love letters from our developers. People are so excited because the pipeline times have come down substantially, and it has a direct impact on the productivity of the team.”\n\nWhile Owen said he definitely wants his DevSecOps team members to be happy, he also wants productivity to be high. And he noted the importance of evaluating a variety of metrics — from happiness to the number of merge requests and releases — to enable teams to quantify their impact on the company. And those numbers are good, too.\n\nWith GitLab, merge requests per engineer are up by 50% over the last year, and they’ve cut their code release time by 50% over the last two years, according to Owen, who added that the number of quarterly releases to production jumped by 73% since two years ago.\n\n“A lot of that is from just shaving time off the CI/CD pipeline,” he said. “That’s efficiency. That’s productivity. It’s important because I need to be able to justify our work and expenses. These kinds of metrics make it very easy for me to say: ‘Investing in this area has a measurable return on investment.’”\n\n## Using GitLab for security and compliance\n\nWhen a business is known around the world for cybersecurity, assuring the safety and reliability of its own code has to be a priority. HackerOne uses GitLab to ensure that security is built into their software products and processes, according to Owen.\n\nHe explained their need, for example, to keep the number of people who have access to their production systems to a minimum. The fewer people who can touch the system, the fewer potential entry points to introduce vulnerabilities. The platform enables the team to build in guidelines mandating that any change needs to be reviewed and approved by the infrastructure team. So when teams make a change to the infrastructure, it’s only applied to the system once it’s been accepted. \n\n“Changes are being reviewed by people who have expertise in the area,” said Owen. “This really allows efficiency, but also maintains security. And it's all done inside GitLab.”\n\nHackerOne also relies on the GitLab platform to make sure they are staying in line with strict [industry compliance requirements](/blog/meet-regulatory-standards-with-gitlab/). Limiting the number of people who have access to the system is an example of that.\n\nOwen said they also use GitLab to [stay compliant](/blog/top-5-compliance-features-to-leverage-in-gitlab/) with FedRAMP, the Federal Risk and Authorization Management Program, a set of standards the U.S. federal government requires for moving sensitive information into the cloud. \n\n“We do more and more work with containers and FedRAMP requires that they be scanned before being moved into production,” he added. “You can’t just bring them into your infrastructure directly. We stage and scan everything in GitLab — all the containers, particularly third-party containers. Then, if they meet our criteria, we move them on so they’re eligible for production.”\n\nWith GitLab, HackerOne remains FedRAMP compliant for all of its federal customers.  \n\n## Looking to an AI future\n\nWhen asked what he thought they’d be focused on over the next five years, Owen replied, “AI, AI, AI.”\n\nFor HackerOne, which brings in immense amounts of data, Owen said he’s looking to artificial intelligence to help them cull that flood of information to find patterns and pull out what will be useful to their customers.\n\n“Whatever business you're in, for a long time, there has been a lot of data,” Owen said. “And you can't help but wonder how you can use it to provide value to your customers, right? There has been a sea change in the last few years, from AI being something that was kind of theoretically interesting to something that is operationally useful. So if you're not doing AI... Well, I'm sure we’re all doing AI. You have to do it now. And the tools are just incredible.”\n\n_HackerOne’s Owen is a software developer, designer, and IT leader with more than 20 years of experience building advanced systems. He previously worked at Google as an engineering manager in the company’s privacy and security department, as well as at Research in Motion, where he was responsible for the Blackberry’s infrastructure design._\n\n_Read more GitLab customer stories on our [customers page](https://about.gitlab.com/customers/)._",[703,482,9,2731],{"slug":3163,"featured":91,"template":680},"why-hackerone-gets-love-letters-from-developers","content:en-us:blog:why-hackerone-gets-love-letters-from-developers.yml","Why Hackerone Gets Love Letters From Developers","en-us/blog/why-hackerone-gets-love-letters-from-developers.yml","en-us/blog/why-hackerone-gets-love-letters-from-developers",{"_path":3169,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3170,"content":3175,"config":3180,"_id":3182,"_type":14,"title":3183,"_source":16,"_file":3184,"_stem":3185,"_extension":19},"/en-us/blog/write-terraform-plans-faster-with-gitlab-duo-code-suggestions",{"title":3171,"description":3172,"ogTitle":3171,"ogDescription":3172,"noIndex":6,"ogImage":1522,"ogUrl":3173,"ogSiteName":693,"ogType":694,"canonicalUrls":3173,"schema":3174},"Write Terraform plans faster with GitLab Duo Code Suggestions","Follow this tutorial to learn how to use AI-powered code creation to manage your infrastructure with Terraform.","https://about.gitlab.com/blog/write-terraform-plans-faster-with-gitlab-duo-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Write Terraform plans faster with GitLab Duo Code Suggestions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2024-01-24\",\n      }",{"title":3171,"description":3172,"authors":3176,"heroImage":1522,"date":3177,"body":3178,"category":10,"tags":3179},[1622],"2024-01-24","[Terraform](https://www.terraform.io/) is an industry-standard for infrastructure orchestration. It can, however, be daunting and time-consuming to learn how to create infrastructure resources, especially when using Terraform providers you are unfamiliar with. That is where GitLab Duo Code Suggestions and AI-powered code creation comes in.\n\n[GitLab Duo Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) accelerates your coding in up to [15 supported programming languages](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/index.html#supported-languages-in-ides), including infrastructure as code (IaC) using Terraform.\n\nBy combining IaC with Terraform, infrastructure teams can adopt new Terraform providers/modules quickly and move changes to production faster. The combination also reduces the onboarding time for new users of Terraform or developers who are new to a code base by maintaining the context of the code base and dependencies in its suggestions. Instead of spending hours reading through documentation, you only need to review the given suggestions and update as necessary.\n\nIn this post, you will learn how to set up GitLab Duo Code Suggestions for Terraform.\n\n## How to set up GitLab Duo Code Suggestions for Terraform\n\nFollow these 6 steps to get started:\n\n1. Install the GitLab extension for your IDE of choice (read about [supported IDE extensions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/index.html#supported-editor-extensions).\n2. Authenticate the GitLab extension with GitLab.com or your GitLab self-hosted instance.\n3. Enable Code Suggestions in your Profile settings under the Preferences > Preferences menu. Search for Code Suggestions, toggle the checkbox, and click Save.\n4. Confirm that Code Suggestions is also enabled in your GitLab extensions settings in the IDE.\n5. If you are using Visual Studio Code and Neovim, third-party extension support is needed to use Terraform with Code Suggestions. For VS Code, you can install the [official Terraform extension](https://marketplace.visualstudio.com/items?itemName=HashiCorp.terraform) from HashiCorp.\n6. To test and apply your Terraform plans, [install Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform) in your terminal and any necessary provider SDKs.\n\n## Creating your plans\n\nOnce you have all the prerequisites in place, all you need to do next is create a Terraform file ending with `.tf` and prompt GitLab Duo with comments describing what you want. Suggestions will be displayed while maintaining context. Even when no prompt is provided, GitLab Duo will suggest other Terraform resources you might be interested in based on the current context in your plans.\n\nLet's see this in action with GitLab Duo helping with a Terraform plan for provisioning a load balancer with four instances and other necessary resources on the Google Cloud Platform.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/saa2JJ57UaQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Get started with Code Suggestions and Terraform today\n\n[Try Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) today and accelerate your software development. Code Suggestions is also available for [self-managed GitLab](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/self_managed.html) (needs to be enabled by an administrator). Code Suggestions uses best-in-class large language models, and non-public customer data is never used as training data.",[482,9,724],{"slug":3181,"featured":91,"template":680},"write-terraform-plans-faster-with-gitlab-duo-code-suggestions","content:en-us:blog:write-terraform-plans-faster-with-gitlab-duo-code-suggestions.yml","Write Terraform Plans Faster With Gitlab Duo Code Suggestions","en-us/blog/write-terraform-plans-faster-with-gitlab-duo-code-suggestions.yml","en-us/blog/write-terraform-plans-faster-with-gitlab-duo-code-suggestions",{"_path":3187,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3188,"content":3194,"config":3199,"_id":3201,"_type":14,"title":3202,"_source":16,"_file":3203,"_stem":3204,"_extension":19},"/en-us/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat",{"title":3189,"description":3190,"ogTitle":3189,"ogDescription":3190,"noIndex":6,"ogImage":3191,"ogUrl":3192,"ogSiteName":693,"ogType":694,"canonicalUrls":3192,"schema":3193},"10 best practices for using AI-powered GitLab Duo Chat","Explore tips and tricks for integrating GitLab Duo Chat into your AI-powered DevSecOps workflows. Plus, expert advice on how to refine chat prompts for the best results.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097639/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%281%29_77JeTV9gAmbXM0224acirV_1750097638765.png","https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"10 best practices for using AI-powered GitLab Duo Chat\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2024-04-02\",\n      }",{"title":3189,"description":3190,"authors":3195,"heroImage":3191,"date":3196,"body":3197,"category":10,"tags":3198},[1217],"2024-04-02","Getting into a conversation with AI can be challenging. What question do you start with? How do you frame the question? How much context is needed? Will the conversation provide the best and most efficient results?\n\nIn this tutorial, we explore 10 tips and best practices to integrate GitLab Duo Chat into your AI-powered DevSecOps workflows and refine your prompts for the best results.\n\n[Get started: Keep GitLab Duo Chat open and in sight](#get-started-keep-gitlab-duo-chat-open-and-in-sight)\n\n[10 best practices for using GitLab Duo Chat](#10-best-practices-for-using-gitlab-duo-chat)\n\n1. [Have a conversation](#1.-have-a-conversation)\n2. [Refine the prompt for more efficiency](#2.-refine-the-prompt-for-more-efficiency)\n3. [Follow prompt patterns](#3.-follow-prompt-patterns)\n4. [Use low-context communication](#4.-use-low-context-communication)\n5. [Repeat yourself](#5.-repeat-yourself)\n6. [Be patient](#6.-be-patient)\n7. [Reset and start anew](#7.-reset-and-start-anew)\n8. [Gain efficiency with slash commands in the IDE](#8.-gain-efficiency-with-slash-commands-in-the-ide)\n9. [Refine the prompt for slash commands](#9.-refine-the-prompt-for-slash-commands)\n10. [Get creative with slash commands](#10.-get-creative-with-slash-commands)\n\nBonus content:\n- [Shortcuts](#shortcuts)\n- [Fun exercises](#fun-exercises)\n- [Learn more](#learn-more)\n\n> Live demo! Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Register today!](https://about.gitlab.com/seventeen/)\n\n## Get started: Keep GitLab Duo Chat open and in sight\n\n[GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) is available in the GitLab UI, Web IDE, and supported programming IDEs, for example, VS Code. \n\nIn VS Code, you can open GitLab Duo Chat in the default left pane. You can also drag and drop the icon into the right pane. This allows you to keep Chat open while you write code and navigate the file tree, perform Git actions, etc. To reset the Chat location, open the command palette (by pressing the `Command+Shift+P` (on macOS) or `Ctrl+Shift+P` (on Windows/Linux) keyboard shortcut and then type `View: Reset View Locations`. The following short video shows you how to do it.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/foZpUvWPRJQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThe Web IDE and VS Code share the same framework – the same method works in the Web IDE for more efficient workflows.\n\n![Chat in Web IDE](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097645344.png)\n\n## 10 best practices for using GitLab Duo Chat\n\n### 1. Have a conversation\n\nChats are conversations, not search forms.\n\nFor the first conversation icebreaker, you can start with the same search terms similar to a browser search and experiment with the response and output. In this example, let's start with a C# project and best practices. \n\n> c# start project best practices\n\n![Chat prompt for C# start project best practices and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097646/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750097645345.png)\n\nThe response is helpful to understand a broad scope of C#, but does not kickstart immediate best practices. Let's follow up with a more focused question in the same context. \n\n> Please show the project structure for the C# project.\n\n![Chat prompt for project structure for the C# project and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750097645346.png)\n\nThis answer is helpful. Next, let's follow up with a Git question, and use the same question structure: Direct request to show something.\n\n> Show an example for a .gitignore for C#\n\n![Chat prompt for a .gitignore for C# and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image25_aHR0cHM6_1750097645347.png)\n\nContinue with CI/CD and ask how to build the C# project.\n\n> Show a GitLab CI/CD configuration for building the C# project\n\n![Chat prompt for GitLab CI/CD configuration for building C# project and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image16_aHR0cHM6_1750097645349.png)\n\nIn this example, Chat encouraged us to request specific changes. Let's ask to use the .NET SDK 8.0 instead of 6.0. \n\n> In the above example, please use the .NET SDK 8.0 image\n\n![Chat prompt to use .NET SDK 8.0 image and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image32_aHR0cHM6_1750097645350.png)\n\nThe CI/CD configuration uses the .NET command line interface (CLI). Maybe we can use that for more efficient commands to create the projects and tests structure, too? \n\n> Explain how to create projects and test structure on the CLI \n\n![Chat prompt to explain how to create projects and test structure on the CLI and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image14_aHR0cHM6_1750097645351.png)\n\nOf course, we could execute these commands in the terminal, but what if we wanted to stay in VS Code? Let's ask Chat.\n\n> Explain how to open a new terminal in VS Code\n\n![Chat prompt to explain how to open a new terminal in VS Code and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097645351.png)\n\n### 2. Refine the prompt for more efficiency\n\nThink of GitLab Duo Chat as a human, and engage with full sentences that provide as much context into your thoughts and questions. \n\nExperienced browser search users might know this approach to queries: Build up the question, add more terms to refine the scope, and restart the search after opening plenty of tabs. \n\nIn a browser search, this probably would result in four to five different search windows. \n\n```markdown\nc# start project best practices\nc# .gitignore\nc# gitlab cicd \nc# gitlab security scanning \nc# solutions and projects, application and tests\n``` \n\nYou can follow this strategy in a chat conversation, too. It requires adding more context, making it a conversational approach. GitLab Duo Chat enables you to ask multiple questions in one conversation request. Example: You need to start with a new C# project, apply best practices, add a `.gitignore` file, and configure CI/CD and security scanning, just like in the above search. In Chat, you can combine the questions into one request.\n\n> How can I get started creating an empty C# console application in VS Code? Please show a .gitignore and .gitlab-ci.yml configuration with steps for C#, and add security scanning for GitLab. Explain how solutions and projects in C# work, and how to add a test project on the CLI.\n\n![Chat prompt adding more context and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image37_aHR0cHM6_1750097645352.png)\n\nIn this response, Chat suggests to ask for specific configuration examples in follow-up questions in the conversation. Async practice: Create follow-up questions. You can omit `C#` as context in the same chat session.\n\n> Please show an example for a .gitignore. Please show a CI/CD configuration. Include the SAST template.\n\n### 3. Follow prompt patterns \n\nFollow the pattern: `Problem statement, ask for help, provide additional requests`. Not everything comes to mind when asking the first question – don't feel blocked, and instead start with `Problem statement, ask for help` in the first iteration. \n\n> I need to fulfill compliance requirements. How can I get started with Codeowners and approval rules?\n\n![Chat prompt to get started with Codeowners and approval rules and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image19_aHR0cHM6_1750097645352.png)\n\nThe answer is helpful but obviously generic. Now, you may want to get specific help for your team setup. \n\n> Please show an example for Codeowners with different teams: backend, frontend, release managers.\n\n![Chat prompt to show an example for Codeowners with different teams: backend, frontend, release managers and reponse ](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image31_aHR0cHM6_1750097645353.png)\n\nAn alternative is to describe the situation you are in and to ask for input. It can feel a bit like a conversation to follow the STAR model (Situation, Task, Action, Results). \n\n> I have a Kubernetes cluster integrated in GitLab. Please generate a Yaml configuration for a Kubernetes service deployment. Explain how GitOps works as a second step. How to verify the results?\n\n![Chat prompt with multiple questions and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image27_aHR0cHM6_1750097645354.png)\n\n### 4. Use low-context communication \n\nProvide as much context as needed to provide an answer. Sometimes, the previous history or opened source code does not provide that helpful context. To make questions more efficient, apply a pattern of [low-context communication](https://handbook.gitlab.com/handbook/company/culture/all-remote/effective-communication/#understanding-low-context-communication), which is used in all-remote communication at GitLab.\n\nThe following question did not provide enough context in a C++ project.\n\n> Should I use virtual override instead of just override?\n\n![Chat prompt asking if the users should use virtual override instead of just override and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image34_aHR0cHM6_1750097645354.png)\n\nInstead, try to add more context:\n\n> When implementing a pure virtual function in an inherited class, should I use virtual function override, or just function override? Context is C++. \n\n![Chat prompt with more detail and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image36_aHR0cHM6_1750097645355.png)\n\nThe example is also shown in the [GitLab Duo Coffee Chat: Refactor C++ functions into OOP classes for abstract database handling](https://youtu.be/Z9EJh0J9358?t=2190). \n\n### 5. Repeat yourself\n\nAI is not predictable. Sometimes, it may not answer with the expected results, or does not produce source code examples or configuration snippets because it lacked context. It is recommended to repeat the question and refine the requirements.\n\nIn the following example, we want to create a C# application. In the first attempt, we did not specify the application type – C# can be used to create console/terminal but also UI applications. The result also does not provide an empty example source code. The second, repeated prompt adds two more words - `console` and `empty`. \n\n> How can I get started creating an C# application in VSCode?\n> \n> How can I get started creating an empty C# console application in VSCode?\n\nThe results in the prompt differ. The first response is helpful to get started by following the instructions in the VS Code window, but it does not tell us where the source code is located and how to modify it. The repeated prompt with refinements modifies the response and provides instructions how to override the default template with some “hello world” code.\n\n![Chat prompt with repeated prompt with modifications and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image28_aHR0cHM6_1750097645355.png)\n\nYou can also combine repeat and refine strategies, and ask Chat to show an example for application code and tests.\n\n> How can I get started creating an empty C# console application in VSCode? Please show an example for application and tests.\n\n![Chat prompt that asks for example for application and tests and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097645356.png)\n\n#### Repeat yourself after generic questions \n\nWhen asking generic technology questions, GitLab Duo Chat might not be able to help. In the following scenario, I wanted to get a suggestion for Java build tools and framework, and it did not work. There could be many answers: Maven, Gradle, etc., as build tools, and [100+ Java frameworks](https://en.wikipedia.org/wiki/List_of_Java_frameworks), depending on the technology stack and requirements.\n\n![Chat prompt for Java build tools and framework and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097645356.png)\n\nLet's assume that we want to focus on a customer environment with [Java Spring Boot](https://spring.io/projects/spring-boot). \n\n> I want to create a Java Spring Boot application. Please explain the project structure and show a hello world example.\n\n![Chat prompt that asks for more, including a hello world example and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image26_aHR0cHM6_1750097645357.png)\n\nThis provides great results already. As an async exercise, repeat the prompt, and ask how to deploy the application, adding more refinements in each step. Alternatively, you can make it a follow-up conversation.\n\n> I want to create a Java Spring Boot application. Please explain the project structure and show a hello world example. Show how to build and deploy the application in CI/CD.\n> \n> I want to create a Java Spring Boot application. Please explain the project structure and show a hello world example. Show how to build and deploy the application in CI/CD, using container images.\n> \n> I want to create a Java Spring Boot application. Please explain the project structure and show a hello world example. Show how to build and deploy the application in CI/CD, using container images. Use Kubernetes and GitOps in GitLab.\n\n### 6. Be patient\n\nSingle words or short sentences might not generate the desired results, [as shown in this video example](https://youtu.be/JketELxLNEw?t=1220). Sometimes, GitLab Duo Chat is able to guess from available data, but sometimes also might insist on providing more context.\n\nExample: `labels` matches the GitLab documentation content.\n\n![Chat prompt about labels and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image12_aHR0cHM6_1750097645357.png)\n\nRefine the question to problem statements and more refinements for issue board usage.\n\n> Explain labels in GitLab. Provide an example for efficient usage with issue boards.\n\n![Chat prompt that includes asking for an example and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image21_aHR0cHM6_1750097645358.png)\n\nOr use a problem statement, followed by a question and the ask for additional examples.\n\n> I don't know how to use labels in GitLab. Please provide examples, and how to use them for filters in different views. Explain these views with examples.\n\n![Chat prompt with problem statement and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750097645358.png)\n\nAlso, avoid `yes/no` questions and instead add specific context.\n\n> Can you help me fix performance regressions?\n\n![Chat promptt that asks for help with fixing performance regressions and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image18_aHR0cHM6_1750097645359.png)\n\nInstead, provide the context of the performance regression, including the programming languages, frameworks, technology stack, and environments. The following example uses an environment from some years ago, which can still be accurate today.\n\n> My PHP application encounters performance regressions using PHP 5.6 and MySQL 5.5. Please explain potential root causes, and how to address them. The app is deployed on Linux VMs.\n\n![Chat prompt that includes more detail and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image24_aHR0cHM6_1750097645360.png)\n\n### 7. Reset and start anew\n\nSometimes, the chat history shows a different learning curve and provides the wrong context for follow-up questions. Or, you asked specific questions where GitLab Duo Chat cannot provide answers. Since generative AI is not predictable, it might also lack the ability to provide certain examples, but think it gave them in a future response (observed in Chat Beta). The underlying large language models, or LLMs, sometimes might insist on giving a specific response, in an endless loop.\n\n> How can I get started creating an empty C# console application in VSCode? Please show a .gitignore and .gitlab-ci.yml configuration with steps for C#, and add security scanning for GitLab. Explain how solutions and projects in C# work, and how to add a test project on the CLI.\n\nAfter asking the question above with an example configuration, I wanted to reduce the scope of the question to get a more tailored response. It did not work as expected, since Chat knows about the chat history in context, and refers to previous answers.\n\n> How can I get started creating an empty C# console application in VSCode? Please show a .gitignore and .gitlab-ci.yml configuration with steps for C#.\n\n![Chat prompt that asks for configuration examples and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image23_aHR0cHM6_1750097645360.png)\n\nTo force Chat into a new context, use `/reset` as slash command to reset the session, and repeat the question to get better results. You can also use `/clean` or `/clear` to delete all messages in the conversation.\n\n### 8. Gain efficiency with slash commands in the IDE \n\n#### Explain code\n\n- Q: Generated code? Existing code? Legacy code?\n- A: Use the [`/explain` slash command in the IDE](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#explain-code-in-the-ide).\n- A2: Refine the prompt with more focused responses, for example: `/explain focus on potential shortcomings or bugs`. \n\n![Chat prompt with /explain slash command](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/gitlab_duo_chat_slash_commands_explain_01_aHR0cHM6_1750097645361.png)\n\n![Chat prompt with refined prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097645361.png)\n\n#### Refactor code \n\n- Q: Unreadable code? Long spaghetti code? Zero test coverage?\n- A: Use the [`/refactor` slash command in the IDE](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#refactor-code-in-the-ide). \n- A2: Refine the prompt for more targeted actions, for example object-oriented patterns: `/refactor into object-oriented classes with methods and attributes`. \n\n![Chat prompt with /refactor slash command](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image35_aHR0cHM6_1750097645362.png)\n\n![Chat prompt with refined prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image30_aHR0cHM6_1750097645362.png)\n\n#### Generate tests\n\n- Q: Testable code but writing tests takes too much time?\n- A: Use the [`/tests` slash command in the IDE](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#write-tests-in-the-ide).\n- A2: Refine the prompt for specific test frameworks, or test targets. You can also instruct the prompt to focus on refactoring, and then generate tests: `/tests focus on refactoring the code into functions, and generate tests`.\n\n![Chat prompt with /tests slash command](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image29_aHR0cHM6_1750097645363.png)\n\n![Chat prompt with refined prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097645363.png)\n\nMore practical examples in complete development workflows are available in the [GitLab Duo examples](https://docs.gitlab.com/ee/user/gitlab_duo_examples.html) documentation.\n\n### 9. Refine the prompt for slash commands \n\nYou will see refined prompts tips in this blog post a lot. It is one of the ingredients for better AI-powered workflow efficiency. Slash commands are no different, and allow for better results in GitLab Duo Chat.\n\nA customer recently asked: \"Can code explanations using `/explain` create comments in code?\" The answer is: no. But you can use the Chat prompt to ask follow-up questions, and ask for a summary in a code comment format. It requires the context of the language. \n\nThe following example with a [C++ HTTP client code using the curl library](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts/-/blob/5cc9bdd65ee8ee16c548bea0402c18f8209d4d06/chat/slash-commands/c++/cli.cpp) needs more documentation. You can refine the `/explain` prompt by giving more refined instructions to explain the code by adding code comments, and then copy-paste that into the editor.\n\n> /explain add documentation, rewrite the code snippet\n\n![Chat prompt to add documentation and rewrite code snippet and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image13_aHR0cHM6_1750097645363.png)\n\nAlternatively, you can ask Chat to `/refactor` the source code, and generate missing code comments through a refined prompt.\n\n> /refactor add code comments and documentation\n\n![Chat prompt to refactor source code and generate code comments](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image15_aHR0cHM6_1750097645364.png)\n\n### 10. Get creative with slash commands\n\nWhen the Chat prompt does not know an answer to a question about the source code or programming language, look into the slash commands `/explain`, `/refactor`, and `/tests` and how much they can help in the context.\n\nIn the following example, an SQL query string in C++ is created in a single line. To increase readability, and also add more database columns in the future, it can be helpful to change the formatting into a multi-line string.\n\n> std::string sql = \"CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT NOT NULL, email TEXT NOT NULL)\";\n\nYou can ask GitLab Duo Chat about it, for example, with the following question:\n\n> How to create a string in C++ using multiple lines?\n\nChat may answer with an explanation and optional, a source code example. In this context, it can interpret the question to create a C++ string value with multiple lines, for example, using the `\\n` character, assigned to a variable. \n\nThe requirement instead is to only format the written code, and variable value assignment in multiple lines. The string value itself does not need to contain a multi-line string representation. \n\nThere is an alternative for additional context in VS Code and the Web IDE: Select the source code in question, right-click, and navigate into `GitLab Duo Chat > Refactor`. This opens the Chat prompt and fires the `/refactor` code task immediately.\n\nAlthough, the code task might not bring the expected results. Refactoring a single-line SQL string can mean a lot of things: Use multiple lines for readability, create constants, etc.\n\nCode tasks provide an option to refine the prompt. You can add more text after the `/refactor` command, and instruct GitLab Duo Chat to use a specific code type, algorithm, or design pattern. \n\nLet's try it again: Select the source code, change focus into Chat, and type the following prompt, followed by `Enter`. \n\n> /refactor into a multi-line written string. Show different approaches for all C++ standards.\n\n![Chat prompt to refactor into a multi-line written string and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image17_aHR0cHM6_1750097645364.png)\n\n**Tip:** You can use GitLab Duo Code Suggestions to refine the source code even more after refactoring, or use alternative `/refactor` prompt refinements.\n\n>/refactor into a multi-line written string, show different approaches\n>\n> /refactor into multi-line string, not using raw string literals\n>\n> /refactor into a multi-line written string. Make the table name parametrizable\n\nAn alternative approach with the `stringstream` type is shown in the [GitLab Duo Coffee Chat: Refactor C++ functions into OOP classes for abstract database handling](https://www.youtube.com/watch?v=Z9EJh0J9358), [MR diff](https://gitlab.com/gitlab-da/use-cases/ai/gitlab-duo-coffee-chat/gitlab-duo-coffee-chat-2024-01-23/-/commit/7ea233138aed46d77e6ce0d930dd8e10560134eb#4ce01e4c84d4b62df8eed159c2db3768ad4ef8bf_33_35). \n\n#### Explain vulnerabilities\n\nIt might not always work, but the `/explain` slash command can be asked about security vulnerability explanations, too. In this example, the [C code](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts/-/blob/5a5f293dfbfac7222ca4013d8f9ce9b462e4cd3a/chat/slash-commands/c/vuln.c) contains multiple vulnerabilities for strcpy() buffer overflows, world writable file permissions, race condition attacks, and more.\n\n> /explain why this code has multiple vulnerabilities\n\n![Chat prompt about the code's multiple vulnerabilities](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image20_aHR0cHM6_1750097645365.png)\n\n#### Refactor C code into Rust\n\nRust provides memory safety. You can ask Duo Chat to refactor the vulnerable [C code](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts/-/blob/5a5f293dfbfac7222ca4013d8f9ce9b462e4cd3a/chat/slash-commands/c/vuln.c) into Rust, using `/refactor into Rust`. Practice with more refined prompts to get better results.\n\n> /refactor into Rust and use high level libraries\n\n![Chat prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750097645366.png)\n\n### Shortcuts \n\nGive these shortcuts a try in your environment, and practice async using GitLab Duo Chat.\n\n1. Inspect vulnerable code from CVEs, and ask what it does, and how to fix it, using `/explain why is this code vulnerable`. \n**Tip:** Import open-source projects in GitLab to take advantage of GitLab Duo Chat code explanations.\n1. Try to refactor code into new programming languages to help legacy code migration plans.\n1. You can also try to refactor Jenkins configuration into GitLab CI/CD, using `/refactor into GitLab CI/CD configuration`. \n\n### Fun exercises \n\nTry to convince Chat to behave like Clippy.\n![Chat prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image22_aHR0cHM6_1750097645366.png)\n\nAsk about GitLab's mission: \"Everyone can contribute.\"\n\n![Chat prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image33_aHR0cHM6_1750097645367.png)\n\n### Learn more\n\nThere are many different environments and challenges out there. We have updated the [GitLab Duo Chat documentation](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) with more practical examples, and added a new [GitLab Duo examples](https://docs.gitlab.com/ee/user/gitlab_duo_examples.html) section with deep dives into AI-powered DevSecOps workflows, including Chat.\n\n> Want to get going with GitLab Duo Chat? [Start your free trial today](https://about.gitlab.com/solutions/gitlab-duo-pro/self-managed-and-gitlab-dedicated-trial/).\n",[9,724,482,704],{"slug":3200,"featured":91,"template":680},"10-best-practices-for-using-ai-powered-gitlab-duo-chat","content:en-us:blog:10-best-practices-for-using-ai-powered-gitlab-duo-chat.yml","10 Best Practices For Using Ai Powered Gitlab Duo Chat","en-us/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat.yml","en-us/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat",15,[666,686,711,732,753,773,794,815,835],1754424513644]