[{"data":1,"prerenderedAt":1332},["ShallowReactive",2],{"/en-us/blog/tags/gitops/":3,"navigation-en-us":20,"banner-en-us":438,"footer-en-us":453,"GitOps-tag-page-en-us":662},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":11,"_id":13,"_type":14,"title":15,"_source":16,"_file":17,"_stem":18,"_extension":19},"/en-us/blog/tags/gitops","tags",false,"",{"tag":9,"tagSlug":10},"GitOps","gitops",{"template":12},"BlogTag","content:en-us:blog:tags:gitops.yml","yaml","Gitops","content","en-us/blog/tags/gitops.yml","en-us/blog/tags/gitops","yml",{"_path":21,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":23,"_id":434,"_type":14,"title":435,"_source":16,"_file":436,"_stem":437,"_extension":19},"/shared/en-us/main-navigation","en-us",{"logo":24,"freeTrial":29,"sales":34,"login":39,"items":44,"search":375,"minimal":406,"duo":425},{"config":25},{"href":26,"dataGaName":27,"dataGaLocation":28},"/","gitlab logo","header",{"text":30,"config":31},"Get free trial",{"href":32,"dataGaName":33,"dataGaLocation":28},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":35,"config":36},"Talk to sales",{"href":37,"dataGaName":38,"dataGaLocation":28},"/sales/","sales",{"text":40,"config":41},"Sign in",{"href":42,"dataGaName":43,"dataGaLocation":28},"https://gitlab.com/users/sign_in/","sign in",[45,89,185,190,296,356],{"text":46,"config":47,"cards":49,"footer":72},"Platform",{"dataNavLevelOne":48},"platform",[50,56,64],{"title":46,"description":51,"link":52},"The most comprehensive AI-powered DevSecOps Platform",{"text":53,"config":54},"Explore our Platform",{"href":55,"dataGaName":48,"dataGaLocation":28},"/platform/",{"title":57,"description":58,"link":59},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":60,"config":61},"Meet GitLab Duo",{"href":62,"dataGaName":63,"dataGaLocation":28},"/gitlab-duo/","gitlab duo ai",{"title":65,"description":66,"link":67},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":68,"config":69},"Learn more",{"href":70,"dataGaName":71,"dataGaLocation":28},"/why-gitlab/","why gitlab",{"title":73,"items":74},"Get started with",[75,80,85],{"text":76,"config":77},"Platform Engineering",{"href":78,"dataGaName":79,"dataGaLocation":28},"/solutions/platform-engineering/","platform engineering",{"text":81,"config":82},"Developer Experience",{"href":83,"dataGaName":84,"dataGaLocation":28},"/developer-experience/","Developer experience",{"text":86,"config":87},"MLOps",{"href":88,"dataGaName":86,"dataGaLocation":28},"/topics/devops/the-role-of-ai-in-devops/",{"text":90,"left":91,"config":92,"link":94,"lists":98,"footer":167},"Product",true,{"dataNavLevelOne":93},"solutions",{"text":95,"config":96},"View all Solutions",{"href":97,"dataGaName":93,"dataGaLocation":28},"/solutions/",[99,124,146],{"title":100,"description":101,"link":102,"items":107},"Automation","CI/CD and automation to accelerate deployment",{"config":103},{"icon":104,"href":105,"dataGaName":106,"dataGaLocation":28},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[108,112,116,120],{"text":109,"config":110},"CI/CD",{"href":111,"dataGaLocation":28,"dataGaName":109},"/solutions/continuous-integration/",{"text":113,"config":114},"AI-Assisted Development",{"href":62,"dataGaLocation":28,"dataGaName":115},"AI assisted development",{"text":117,"config":118},"Source Code Management",{"href":119,"dataGaLocation":28,"dataGaName":117},"/solutions/source-code-management/",{"text":121,"config":122},"Automated Software Delivery",{"href":105,"dataGaLocation":28,"dataGaName":123},"Automated software delivery",{"title":125,"description":126,"link":127,"items":132},"Security","Deliver code faster without compromising security",{"config":128},{"href":129,"dataGaName":130,"dataGaLocation":28,"icon":131},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[133,136,141],{"text":134,"config":135},"Security & Compliance",{"href":129,"dataGaLocation":28,"dataGaName":134},{"text":137,"config":138},"Software Supply Chain Security",{"href":139,"dataGaLocation":28,"dataGaName":140},"/solutions/supply-chain/","Software supply chain security",{"text":142,"config":143},"Compliance & Governance",{"href":144,"dataGaLocation":28,"dataGaName":145},"/solutions/continuous-software-compliance/","Compliance and governance",{"title":147,"link":148,"items":153},"Measurement",{"config":149},{"icon":150,"href":151,"dataGaName":152,"dataGaLocation":28},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[154,158,162],{"text":155,"config":156},"Visibility & Measurement",{"href":151,"dataGaLocation":28,"dataGaName":157},"Visibility and Measurement",{"text":159,"config":160},"Value Stream Management",{"href":161,"dataGaLocation":28,"dataGaName":159},"/solutions/value-stream-management/",{"text":163,"config":164},"Analytics & Insights",{"href":165,"dataGaLocation":28,"dataGaName":166},"/solutions/analytics-and-insights/","Analytics and insights",{"title":168,"items":169},"GitLab for",[170,175,180],{"text":171,"config":172},"Enterprise",{"href":173,"dataGaLocation":28,"dataGaName":174},"/enterprise/","enterprise",{"text":176,"config":177},"Small Business",{"href":178,"dataGaLocation":28,"dataGaName":179},"/small-business/","small business",{"text":181,"config":182},"Public Sector",{"href":183,"dataGaLocation":28,"dataGaName":184},"/solutions/public-sector/","public sector",{"text":186,"config":187},"Pricing",{"href":188,"dataGaName":189,"dataGaLocation":28,"dataNavLevelOne":189},"/pricing/","pricing",{"text":191,"config":192,"link":194,"lists":198,"feature":283},"Resources",{"dataNavLevelOne":193},"resources",{"text":195,"config":196},"View all resources",{"href":197,"dataGaName":193,"dataGaLocation":28},"/resources/",[199,232,255],{"title":200,"items":201},"Getting started",[202,207,212,217,222,227],{"text":203,"config":204},"Install",{"href":205,"dataGaName":206,"dataGaLocation":28},"/install/","install",{"text":208,"config":209},"Quick start guides",{"href":210,"dataGaName":211,"dataGaLocation":28},"/get-started/","quick setup checklists",{"text":213,"config":214},"Learn",{"href":215,"dataGaLocation":28,"dataGaName":216},"https://university.gitlab.com/","learn",{"text":218,"config":219},"Product documentation",{"href":220,"dataGaName":221,"dataGaLocation":28},"https://docs.gitlab.com/","product documentation",{"text":223,"config":224},"Best practice videos",{"href":225,"dataGaName":226,"dataGaLocation":28},"/getting-started-videos/","best practice videos",{"text":228,"config":229},"Integrations",{"href":230,"dataGaName":231,"dataGaLocation":28},"/integrations/","integrations",{"title":233,"items":234},"Discover",[235,240,245,250],{"text":236,"config":237},"Customer success stories",{"href":238,"dataGaName":239,"dataGaLocation":28},"/customers/","customer success stories",{"text":241,"config":242},"Blog",{"href":243,"dataGaName":244,"dataGaLocation":28},"/blog/","blog",{"text":246,"config":247},"Remote",{"href":248,"dataGaName":249,"dataGaLocation":28},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":251,"config":252},"TeamOps",{"href":253,"dataGaName":254,"dataGaLocation":28},"/teamops/","teamops",{"title":256,"items":257},"Connect",[258,263,268,273,278],{"text":259,"config":260},"GitLab Services",{"href":261,"dataGaName":262,"dataGaLocation":28},"/services/","services",{"text":264,"config":265},"Community",{"href":266,"dataGaName":267,"dataGaLocation":28},"/community/","community",{"text":269,"config":270},"Forum",{"href":271,"dataGaName":272,"dataGaLocation":28},"https://forum.gitlab.com/","forum",{"text":274,"config":275},"Events",{"href":276,"dataGaName":277,"dataGaLocation":28},"/events/","events",{"text":279,"config":280},"Partners",{"href":281,"dataGaName":282,"dataGaLocation":28},"/partners/","partners",{"backgroundColor":284,"textColor":285,"text":286,"image":287,"link":291},"#2f2a6b","#fff","Insights for the future of software development",{"altText":288,"config":289},"the source promo card",{"src":290},"/images/navigation/the-source-promo-card.svg",{"text":292,"config":293},"Read the latest",{"href":294,"dataGaName":295,"dataGaLocation":28},"/the-source/","the source",{"text":297,"config":298,"lists":300},"Company",{"dataNavLevelOne":299},"company",[301],{"items":302},[303,308,314,316,321,326,331,336,341,346,351],{"text":304,"config":305},"About",{"href":306,"dataGaName":307,"dataGaLocation":28},"/company/","about",{"text":309,"config":310,"footerGa":313},"Jobs",{"href":311,"dataGaName":312,"dataGaLocation":28},"/jobs/","jobs",{"dataGaName":312},{"text":274,"config":315},{"href":276,"dataGaName":277,"dataGaLocation":28},{"text":317,"config":318},"Leadership",{"href":319,"dataGaName":320,"dataGaLocation":28},"/company/team/e-group/","leadership",{"text":322,"config":323},"Team",{"href":324,"dataGaName":325,"dataGaLocation":28},"/company/team/","team",{"text":327,"config":328},"Handbook",{"href":329,"dataGaName":330,"dataGaLocation":28},"https://handbook.gitlab.com/","handbook",{"text":332,"config":333},"Investor relations",{"href":334,"dataGaName":335,"dataGaLocation":28},"https://ir.gitlab.com/","investor relations",{"text":337,"config":338},"Trust Center",{"href":339,"dataGaName":340,"dataGaLocation":28},"/security/","trust center",{"text":342,"config":343},"AI Transparency Center",{"href":344,"dataGaName":345,"dataGaLocation":28},"/ai-transparency-center/","ai transparency center",{"text":347,"config":348},"Newsletter",{"href":349,"dataGaName":350,"dataGaLocation":28},"/company/contact/","newsletter",{"text":352,"config":353},"Press",{"href":354,"dataGaName":355,"dataGaLocation":28},"/press/","press",{"text":357,"config":358,"lists":359},"Contact us",{"dataNavLevelOne":299},[360],{"items":361},[362,365,370],{"text":35,"config":363},{"href":37,"dataGaName":364,"dataGaLocation":28},"talk to sales",{"text":366,"config":367},"Get help",{"href":368,"dataGaName":369,"dataGaLocation":28},"/support/","get help",{"text":371,"config":372},"Customer portal",{"href":373,"dataGaName":374,"dataGaLocation":28},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":376,"login":377,"suggestions":384},"Close",{"text":378,"link":379},"To search repositories and projects, login to",{"text":380,"config":381},"gitlab.com",{"href":42,"dataGaName":382,"dataGaLocation":383},"search login","search",{"text":385,"default":386},"Suggestions",[387,389,393,395,399,403],{"text":57,"config":388},{"href":62,"dataGaName":57,"dataGaLocation":383},{"text":390,"config":391},"Code Suggestions (AI)",{"href":392,"dataGaName":390,"dataGaLocation":383},"/solutions/code-suggestions/",{"text":109,"config":394},{"href":111,"dataGaName":109,"dataGaLocation":383},{"text":396,"config":397},"GitLab on AWS",{"href":398,"dataGaName":396,"dataGaLocation":383},"/partners/technology-partners/aws/",{"text":400,"config":401},"GitLab on Google Cloud",{"href":402,"dataGaName":400,"dataGaLocation":383},"/partners/technology-partners/google-cloud-platform/",{"text":404,"config":405},"Why GitLab?",{"href":70,"dataGaName":404,"dataGaLocation":383},{"freeTrial":407,"mobileIcon":412,"desktopIcon":417,"secondaryButton":420},{"text":408,"config":409},"Start free trial",{"href":410,"dataGaName":33,"dataGaLocation":411},"https://gitlab.com/-/trials/new/","nav",{"altText":413,"config":414},"Gitlab Icon",{"src":415,"dataGaName":416,"dataGaLocation":411},"/images/brand/gitlab-logo-tanuki.svg","gitlab icon",{"altText":413,"config":418},{"src":419,"dataGaName":416,"dataGaLocation":411},"/images/brand/gitlab-logo-type.svg",{"text":421,"config":422},"Get Started",{"href":423,"dataGaName":424,"dataGaLocation":411},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":426,"mobileIcon":430,"desktopIcon":432},{"text":427,"config":428},"Learn more about GitLab Duo",{"href":62,"dataGaName":429,"dataGaLocation":411},"gitlab duo",{"altText":413,"config":431},{"src":415,"dataGaName":416,"dataGaLocation":411},{"altText":413,"config":433},{"src":419,"dataGaName":416,"dataGaLocation":411},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":439,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"title":440,"button":441,"image":445,"config":448,"_id":450,"_type":14,"_source":16,"_file":451,"_stem":452,"_extension":19},"/shared/en-us/banner","is now in public beta!",{"text":68,"config":442},{"href":443,"dataGaName":444,"dataGaLocation":28},"/gitlab-duo/agent-platform/","duo banner",{"config":446},{"src":447},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1753720689/somrf9zaunk0xlt7ne4x.svg",{"layout":449},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":454,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":455,"_id":658,"_type":14,"title":659,"_source":16,"_file":660,"_stem":661,"_extension":19},"/shared/en-us/main-footer",{"text":456,"source":457,"edit":463,"contribute":468,"config":473,"items":478,"minimal":650},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":458,"config":459},"View page source",{"href":460,"dataGaName":461,"dataGaLocation":462},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":464,"config":465},"Edit this page",{"href":466,"dataGaName":467,"dataGaLocation":462},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":469,"config":470},"Please contribute",{"href":471,"dataGaName":472,"dataGaLocation":462},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":474,"facebook":475,"youtube":476,"linkedin":477},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[479,502,557,586,620],{"title":46,"links":480,"subMenu":485},[481],{"text":482,"config":483},"DevSecOps platform",{"href":55,"dataGaName":484,"dataGaLocation":462},"devsecops platform",[486],{"title":186,"links":487},[488,492,497],{"text":489,"config":490},"View plans",{"href":188,"dataGaName":491,"dataGaLocation":462},"view plans",{"text":493,"config":494},"Why Premium?",{"href":495,"dataGaName":496,"dataGaLocation":462},"/pricing/premium/","why premium",{"text":498,"config":499},"Why Ultimate?",{"href":500,"dataGaName":501,"dataGaLocation":462},"/pricing/ultimate/","why ultimate",{"title":503,"links":504},"Solutions",[505,510,513,515,520,525,529,532,536,539,541,544,547,552],{"text":506,"config":507},"Digital transformation",{"href":508,"dataGaName":509,"dataGaLocation":462},"/topics/digital-transformation/","digital transformation",{"text":134,"config":511},{"href":129,"dataGaName":512,"dataGaLocation":462},"security & compliance",{"text":123,"config":514},{"href":105,"dataGaName":106,"dataGaLocation":462},{"text":516,"config":517},"Agile development",{"href":518,"dataGaName":519,"dataGaLocation":462},"/solutions/agile-delivery/","agile delivery",{"text":521,"config":522},"Cloud transformation",{"href":523,"dataGaName":524,"dataGaLocation":462},"/topics/cloud-native/","cloud transformation",{"text":526,"config":527},"SCM",{"href":119,"dataGaName":528,"dataGaLocation":462},"source code management",{"text":109,"config":530},{"href":111,"dataGaName":531,"dataGaLocation":462},"continuous integration & delivery",{"text":533,"config":534},"Value stream management",{"href":161,"dataGaName":535,"dataGaLocation":462},"value stream management",{"text":9,"config":537},{"href":538,"dataGaName":10,"dataGaLocation":462},"/solutions/gitops/",{"text":171,"config":540},{"href":173,"dataGaName":174,"dataGaLocation":462},{"text":542,"config":543},"Small business",{"href":178,"dataGaName":179,"dataGaLocation":462},{"text":545,"config":546},"Public sector",{"href":183,"dataGaName":184,"dataGaLocation":462},{"text":548,"config":549},"Education",{"href":550,"dataGaName":551,"dataGaLocation":462},"/solutions/education/","education",{"text":553,"config":554},"Financial services",{"href":555,"dataGaName":556,"dataGaLocation":462},"/solutions/finance/","financial services",{"title":191,"links":558},[559,561,563,565,568,570,572,574,576,578,580,582,584],{"text":203,"config":560},{"href":205,"dataGaName":206,"dataGaLocation":462},{"text":208,"config":562},{"href":210,"dataGaName":211,"dataGaLocation":462},{"text":213,"config":564},{"href":215,"dataGaName":216,"dataGaLocation":462},{"text":218,"config":566},{"href":220,"dataGaName":567,"dataGaLocation":462},"docs",{"text":241,"config":569},{"href":243,"dataGaName":244,"dataGaLocation":462},{"text":236,"config":571},{"href":238,"dataGaName":239,"dataGaLocation":462},{"text":246,"config":573},{"href":248,"dataGaName":249,"dataGaLocation":462},{"text":259,"config":575},{"href":261,"dataGaName":262,"dataGaLocation":462},{"text":251,"config":577},{"href":253,"dataGaName":254,"dataGaLocation":462},{"text":264,"config":579},{"href":266,"dataGaName":267,"dataGaLocation":462},{"text":269,"config":581},{"href":271,"dataGaName":272,"dataGaLocation":462},{"text":274,"config":583},{"href":276,"dataGaName":277,"dataGaLocation":462},{"text":279,"config":585},{"href":281,"dataGaName":282,"dataGaLocation":462},{"title":297,"links":587},[588,590,592,594,596,598,600,604,609,611,613,615],{"text":304,"config":589},{"href":306,"dataGaName":299,"dataGaLocation":462},{"text":309,"config":591},{"href":311,"dataGaName":312,"dataGaLocation":462},{"text":317,"config":593},{"href":319,"dataGaName":320,"dataGaLocation":462},{"text":322,"config":595},{"href":324,"dataGaName":325,"dataGaLocation":462},{"text":327,"config":597},{"href":329,"dataGaName":330,"dataGaLocation":462},{"text":332,"config":599},{"href":334,"dataGaName":335,"dataGaLocation":462},{"text":601,"config":602},"Sustainability",{"href":603,"dataGaName":601,"dataGaLocation":462},"/sustainability/",{"text":605,"config":606},"Diversity, inclusion and belonging (DIB)",{"href":607,"dataGaName":608,"dataGaLocation":462},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":337,"config":610},{"href":339,"dataGaName":340,"dataGaLocation":462},{"text":347,"config":612},{"href":349,"dataGaName":350,"dataGaLocation":462},{"text":352,"config":614},{"href":354,"dataGaName":355,"dataGaLocation":462},{"text":616,"config":617},"Modern Slavery Transparency Statement",{"href":618,"dataGaName":619,"dataGaLocation":462},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":621,"links":622},"Contact Us",[623,626,628,630,635,640,645],{"text":624,"config":625},"Contact an expert",{"href":37,"dataGaName":38,"dataGaLocation":462},{"text":366,"config":627},{"href":368,"dataGaName":369,"dataGaLocation":462},{"text":371,"config":629},{"href":373,"dataGaName":374,"dataGaLocation":462},{"text":631,"config":632},"Status",{"href":633,"dataGaName":634,"dataGaLocation":462},"https://status.gitlab.com/","status",{"text":636,"config":637},"Terms of use",{"href":638,"dataGaName":639,"dataGaLocation":462},"/terms/","terms of use",{"text":641,"config":642},"Privacy statement",{"href":643,"dataGaName":644,"dataGaLocation":462},"/privacy/","privacy statement",{"text":646,"config":647},"Cookie preferences",{"dataGaName":648,"dataGaLocation":462,"id":649,"isOneTrustButton":91},"cookie preferences","ot-sdk-btn",{"items":651},[652,654,656],{"text":636,"config":653},{"href":638,"dataGaName":639,"dataGaLocation":462},{"text":641,"config":655},{"href":643,"dataGaName":644,"dataGaLocation":462},{"text":646,"config":657},{"dataGaName":648,"dataGaLocation":462,"id":649,"isOneTrustButton":91},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"allPosts":663,"featuredPost":1310,"totalPagesCount":1330,"initialPosts":1331},[664,688,711,732,753,776,796,820,841,860,880,898,917,938,958,978,1000,1021,1041,1063,1085,1107,1130,1151,1171,1192,1211,1230,1248,1271,1291],{"_path":665,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":666,"content":674,"config":681,"_id":684,"_type":14,"title":685,"_source":16,"_file":686,"_stem":687,"_extension":19},"/en-us/blog/announcing-gitlab-devsecops",{"title":667,"description":668,"ogTitle":667,"ogDescription":668,"noIndex":6,"ogImage":669,"ogUrl":670,"ogSiteName":671,"ogType":672,"canonicalUrls":670,"schema":673},"Announcing GitLab for DevSecOps","GitLab brings development, security, and operations into a single application.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671668/Blog/Hero%20Images/integrated.jpg","https://about.gitlab.com/blog/announcing-gitlab-devsecops","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Announcing GitLab for DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Erica Lindberg\"}],\n        \"datePublished\": \"2019-06-20\",\n      }",{"title":667,"description":668,"authors":675,"heroImage":669,"date":677,"body":678,"category":299,"tags":679},[676],"Erica Lindberg","2019-06-20","\n\n[DevOps](/topics/devops/) adoption continues to mature, and as organizations get better at breaking down silos in the development and delivery process to ship software faster, security is moving to the forefront. In fact, [Forrester predicted that 2019 would be the year of security](https://www.forrester.com/report/Predictions+2019+DevOps/-/E-RES144579): “Many organizations have succeeded in automating continuous release and deployment for some applications but face increasing risk from lack of governance and fragmented toolchains.”\n\n[DevSecOps](/topics/devsecops/) is the natural next iteration of DevOps and today we are excited to announce that we are bringing developers, operations professionals, and the security team together in the first single application for the entire DevSecOps lifecycle. Building upon the [security features](/pricing/feature-comparison/) we’ve added over the past 12 months (SAST, DAST, dependency scanning, and container scanning), we’ve also released Auto Remediation, Security Dashboards, and will release [Security Approvals](https://gitlab.com/gitlab-org/gitlab-ee/issues/9928) in 12.1, creating an application with security and compliance built in. Additionally, we’ve expanded our operations capabilities: In addition to our Kubernetes-native integrations and multicloud deployment support, we’ve added [Feature Flags](/direction/release/feature_flags/), an [Operations Dashboard](https://docs.gitlab.com/ee/user/operations_dashboard/), and will release [Incident Management](/direction/service_management/incident_management/) in 12.1.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/zRUwU6ZE-QA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWith GitLab 12.0, releasing on June 22, we continue to support DevSecOps with the addition of Visual Review Tools, project dependency list, and Merge Trains. As our CEO, [Sid Sijbrandij](/company/team/#sytses), says, “GitLab 12.0 marks a key step in our journey to create an inclusive approach to DevSecOps.” And users are already seeing the benefits:\n\n“We already have a frontend team, an SDK and native app team, an SRE team, and a services team all collaborating on development, security, and operations in GitLab,” said Cillian Dwyer, site reliability engineer, Glympse. “Because we're together, we're able to collaborate and ship faster.”\n\n“As a project, we’re excited to see all of the updates GitLab has made. GitLab makes it easy for us to work on community contributions and CI in general,” said Eduardo Silva, maintainer of [Fluent Bit](https://fluentbit.io/) (a Fluentd sub-project), the cloud native, open source logging solution to unify data collection and consumption.\n\n## DevSecOps in a single application\n\nThe [advantages of a single application](/handbook/product/single-application/) are numerous: A single sign-on eliminates the need to request access to each separate tool, context switching is reduced which improves cycle time, and work is tracked in one place so you don’t have to do detective work to find the information you need. According to Forrester’s [Manage Your Toolchain Before It Manages You report](/resources/whitepaper-forrester-manage-your-toolchain/), over 40% of enterprises anticipate improved quality, security, and developer productivity by using an out-of-the-box solution. For security professionals, this means that balancing velocity with security is possible.\n\nSecurity has traditionally been the “final hurdle” in the development lifecycle, tacked on at the end and often reviewed long after the developer committed their code. When security is separate from the DevOps workflow, it becomes a potential bottleneck to delivery. DevSecOps aims to integrate security best practices in the DevOps workflow to ensure every piece of code is tested upon commit. GitLab takes that a step further by building [security capabilities](/direction/secure/#auto-remediation) into the [CI/CD workflow](/topics/ci-cd/), empowering developers to identify vulnerabilities and remove them early, and by providing the security team with their own dashboard to view items not resolved by the developers.\n\n![GitLab DevSecOps diagram](https://about.gitlab.com/images/secure/security-diagram.svg){: .medium.center}\n\nGitLab integrates and automates security into the CI/CD pipeline.\n{: .note.text-center}\n\nThe approach is to support decision makers with actionable tools that make it simpler to take the right action and learn from it. Instead of security features automatically blocking a pipeline or preventing a new version from being released to production, users can interact with the tool and perform a risk assessment based on the information provided. When triaging vulnerabilities, users can confirm by creating an issue to solve the problem or dismiss them in the case of false positives.\n\n### Secure features in GitLab\n\nWe started our DevSecOps journey by empowering developers to spot potential vulnerabilities while coding by embedding [Static Application Security Testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/) and [Dynamic Application Security Testing (DAST)](https://docs.gitlab.com/ee/user/application_security/dast/), [Container Scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/), and [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/) into the CI/CD pipeline.\n\n![GitLab static application security testing feature](https://about.gitlab.com/images/secure/sast.png){: .shadow.medium.center}\n\nDetected vulnerabilities are shown directly in the merge request.\n{: .note.text-center}\n\nSAST scans the application source code and binaries to spot potential vulnerabilities before deployment, and DAST analyzes your running web application for runtime vulnerabilities and runs live attacks against the review app. Information is shown directly in the merge request and results are sorted by the priority of the vulnerability.\n\nLikewise, Dependency Scanning and Container Scanning are built into GitLab CI/CD and available as part of [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) to provide security by default. Container Scanning runs a security scan to ensure your environment does not have any known vulnerability. Dependency scanning analyzes external dependencies (e.g. Ruby gem libraries), alerting the developer if vulnerable dependencies need updating. Results are shown in both the merge request and pipeline views.\n\n> “Having automated security scans built into GitLab merge requests spanning across the entire DevOps lifecycle go together perfectly with Rancher and K3s. Our joint customers have more confidence that new vulnerabilities are not being introduced into their code before, during and after deployment.” - Shannon Williams, co-founder at Rancher\n\nThe next iteration of our security journey included making GitLab a viable product for the security team by automating what can be automated and ensuring security teams have the view they need to understand the current security status of their applications.\n\n![GitLab Security Dashboard](https://about.gitlab.com/images/secure/security-dashboard.png){: .shadow.medium.center}\nThe group-level Security Dashboard in GitLab gives an overview of vulnerabilities for all projects within the group and sub-groups.\n{: .note.text-center}\n\nThe [Security Dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/) is available at both the group and product level and can be used as a primary tool for security teams. In addition to providing an overview of security status, the Security Dashboard can be used to start a remediation process and provides data visualizations for easy consumption of performance information.\n\nFinally, we are rapidly iterating on Auto Remediation to automate vulnerability fixes. Auto Remediation aims to automate vulnerability solution flow, and automatically create a fix. The fix is then tested, and if it passes all the tests already defined for the application, it is deployed to production. GitLab can then monitor performances of the deployed app, and revert all the changes in case performances are decreasing dramatically, warning the user about the entire process and reducing the need for manual actions. You can [learn more about Auto Remediation and our progress here](https://gitlab.com/groups/gitlab-org/-/epics/759).\n\n> \"By adding automated security processes into code delivery, GitLab is furthering productivity by allowing organizations to focus on getting their applications to market not only quickly but securely. We look forward to continuing to work with GitLab as they build out more capabilities in their application, and help companies using GitLab further their multicloud strategy.\" - Bassam Tabbara, CEO of Upbound\n\nOverall, with security automated throughout the developer workflow and DevSecOps delivered in a single application, we believe companies will continue to advance the way they deliver code, shortening release cycles and focusing on the innovation they will bring to market. For more information on what shipped with 12.0, watch out for the release post on June 22.\n\nCover image by [Katie Burkhart](https://unsplash.com/@katieanalyzes) on [Unsplash](https://unsplash.com/photos/ks_e5Rf-Cn0)\n{: .note}\n",[9,680],"DevSecOps",{"slug":682,"featured":6,"template":683},"announcing-gitlab-devsecops","BlogPost","content:en-us:blog:announcing-gitlab-devsecops.yml","Announcing Gitlab Devsecops","en-us/blog/announcing-gitlab-devsecops.yml","en-us/blog/announcing-gitlab-devsecops",{"_path":689,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":690,"content":695,"config":706,"_id":708,"_type":14,"title":691,"_source":16,"_file":709,"_stem":710,"_extension":19},"/en-us/blog/being-a-better-ally",{"title":691,"description":691,"ogTitle":691,"ogDescription":691,"noIndex":6,"ogImage":692,"ogUrl":693,"ogSiteName":671,"ogType":672,"canonicalUrls":693,"schema":694},"Being A Better Ally","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679611/Blog/Hero%20Images/cook-county-blog-unsplash.jpg","https://about.gitlab.com/blog/being-a-better-ally","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Being A Better Ally\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2020-09-09\",\n      }",{"title":691,"description":691,"authors":696,"heroImage":692,"date":698,"body":699,"category":700,"tags":701},[697],"David O'Regan","2020-09-09","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nI’ve been at GitLab five months now - with every merge request committed and milestone met, GitLab team members collaborate on innovative and efficient methods of delivering some of the world's best software. Besides this, I’ve also noticed that GitLab team members do a superb job of creating an environment that allows each of our peers to contribute, be heard, and bring their whole selves to work. Once you’ve gotten a taste of being part of that kind of community you get to see how much it matters in writing good code - talented and inclusive teams are more creative, efficient, and happier in the workplace.\n\nThe Gitlab Handbook offers amazing explanations about our values and resources for living them to your best ability. One of our values ‘Diversity, Inclusion, and Belonging’ has the following note on this value that I really appreciate:\n\n> “... Diversity is having a seat at the table, Inclusion is having a voice and feeling empowered to use it, and Belonging is acknowledgement of your voice being heard along with creating an environment where team members feel secure to be themselves...”\n\nAnyone can, and everyone should strive to nurture these values. For myself, as a person with intersecting levels of privilege who is not a member of an underrepresented group, allyship is a fantastic way to help build a better working environment. Though, being honest, if you asked me 6 months ago what allyship meant, I couldn't have told you. Going over the Handbook was a great start and I highly encourage it, as was working through my own [diversity training](https://gitlab.com/gitlab-com/diversity-and-inclusion/-/issues/127) but I also wanted to put together a small piece for anyone else getting started on their journey in allyship. Something my old bodybuilding coach [Blue Shinners](https://www.independent.ie/lifestyle/big-and-beautiful-26331156.html) used to tell me came to mind when I started the learning process;\n\n> It's not complicated, but that doesn't make it easy - Blue Shinners\n\nFor context on this piece, I want to let you know the following:\nI am a white, cisgender male (pronouns: he|him|his). I also have a lot of privilege along other lines of intersectionality (e.g. heterosexual, neurotypical, a citizen of an affluent, peaceful nation, etc).\n\nI am far from an expert in the field of allyship or building inclusive work environments, so I’m relying on my lived experiences and what I’ve learned from reading and listening to others. Regarding scope, I am committed to using my position within GitLab to help foster an inclusive and diverse environment aligned with GitLab’s core values.\n\nI’ve got to say - for me, becoming a better ally looks a lot more like a patchwork of small bursts of reading, learning little bits in social interactions, being corrected here and there, apologizing, and using what I’ve learned to do better. It isn’t always tidy, but if I were to lay my journey out in general steps, it would look a little something like this:\nGetting excited about learning and listening, making space, and making spaces inclusive. Contributing to a better culture, where and how you can. If you see something, (and it’s safe for you to do so,) say something. Though most of all accept that you will make mistakes, and strive for course correction.\n\nI personally make a lot of mistakes. The uncomfortable truth about making mistakes is they are part of lifeand more part of living honestly. Alan Watts very famously said that you do not know where your choices come from when you live honestly, and this can cause you to fumble as you explore like a toddler taking their first steps. One of the most difficult things I have ever done is to honestly level the question at myself;\n\n> If you make mistakes in all other areas of life, is it possible you also make mistakes in this area?\n\nThe natural instinct is to pull back in ~~anger~~(fear), claiming you are a good person and would never intentionally set out to treat people differently based on something as shallow as how they look or present themseleves. Regardless of how you feel, making mistakes is inevitable purely due to the fact that without a well thought out dose of empathy, you simply cannot assume someone else's situation nor experience.\n\n### Getting excited about learning and listening\n\nSearch for answers - you don’t have to know everything about the historical and cultural basis of social injustice, but I know that even a few articles or YouTube videos here and there have made the difference in giving me a better foundation for understanding, open-mindedness, kindness, and better conduct.\n\nListen to your peers, partners, and colleagues when they share their experiences or important pieces of news with you - and be willing to share yours, if asked.\n\nKnow your [‘-isms’](https://en.wikipedia.org/wiki/-ism) and learn about unconscious biases - especially your own. If you haven’t experienced certain kinds of prejudice or discrimination it may be while until you learn about them or how you unconsciously maintain them. Knowing about them lets you make an active choice in reducing toxic behaviors in shared spaces.\n\nIf you can’t find what you’re looking for in your research, don’t be afraid to ask for help. When possible, ask the appropriate person for help, like your team supervisor, or even ask in the GitLab diversity slack channel (I personally learn a lot from this channel each day).\n\nWhen the context is more specific, ask your peers - but leave room for individuals to say no or for groups to leave anonymous feedback.\n\nKnowledge is necessary to good allyship. And it’s sufficient when paired with inclusive, affirming actions. Like with any good piece of code, go for the minimum viable product - learn more, incorporate it into your daily actions, be willing to keep adding to that knowledge base and growing.\n\n### Making space, and making spaces inclusive\n\nLearn about the space you’re in and space you occupy - for myself, this journey meant coming through the understanding that software companies are overwhelmingly composed of people that look just like me. Learning more about how and why some groups are underrepresented, even in companies like GitLab, is another important step in allyship.\n\nLooking at GitLab’s values, we’re encouraged to see others succeed and help where we can. Good allyship is about doing my best to ensure that underrepresented voices are given at least as much space as my own, both by letting people know you want their contribution with affirming and inclusive language and by showing your appreciation for those contributions by giving credit and offering praise centered on their hard work.\n\nUse common sense and be kind in your interactions with your colleagues. Don’t make assumptions. Be flexible and open-minded. Be respectful of others’ privacy and get excited about what they’re willing to share with you, from their quirks to their life story and family album. These interactions create the bond GitLab team memebers share and makes this all-remote team that you love to work with.\n\n### Contribute to a better culture, where and how you can\n\nImproving the work we do at GitLab is often about your contributions, but it’s also about how good a job we do as allies to ensure that all of our ideas and contributions receive time, consideration, and credit. As a good ally, this means remembering to uplift and make space for the most marginal voices.\n\nCelebrate intersectional as well as cross-functional collaboration by considering paired-programming or mentorship with someone new. The benefit of working at GitLab is that it is teeming with talented developers from all backgrounds. When you issue or consider a request for paired programming or mentorship, center the goals and timeline to confirm you both have the time and skills to get it all in. Be willing to meet the other person where they’re at: be flexible, respectful, and accommodating of their needs in the workplace, ask them about their experiences, be willing to share yours.\n\nShare your time and your love of code with your local community - hundreds of cities worldwide have organizations and education programs that promote programming for marginalized groups and youth. Even if it’s during traditional working hours, at GitLab, we have the flexibility of working things out with our supervisor to support the events and people that won’t wait until ‘after work’. A great example of this is the [Vue Vixens](https://www.vuevixens.org/) which are one of my personal favorites.\n\nDonate to an organization or cause that is able to do the work you can’t on your own. Did you know that GitLab has a pretty great [donation matching program proposal in the makes?](https://gitlab.com/gitlab-com/diversity-and-inclusion/-/issues/91) It’s nice to know they’ll back you up on the causes you support.\n\nIf you see something (and it’s safe for you to do so), say something. Call out discrimination - address the behavior, without labeling the person. This comes back to empathy in the workplace. Be in the shoes of the person or group experiencing the discriminatory behaviour, and be in the shoes of the person behind the behaviour. Support the marginalized person or group, to reinforce the equal value of everyone of GitLab. Addressing someone’s discriminatory behaviour and holding them accountable gives them an opportunity to adopt, adapt, and improve.\n\nWe have a lot of options in how we respond to discriminatory behavior. I go into all interactions with my team assuming good intent, and keep in mind that we are each more than our work or individual actions. I also keep in mind that while discriminatory behavior can be addressed directly and in the moment or context, it can also be addressed indirectly, or in a 1-on-1 afterwards, which can offer a more approachable context for difficult feedback.\n\nI recognize that in an ideal world, everyone would feel comfortable calling out discriminatory behavior, but it isn’t always safe for everyone to do so - especially members of the groups being discriminated against. That’s where ally’s like myself come in - inclusive spaces are about shared work, and I have more opportunities than many to help build that.\n\nAccept that you will make mistakes, and strive for course correction in all areas.\n\n“Many would-be allies fear making mistakes that could have them labeled as “-ist” or “-ic” (racist, sexist, transphobic, homophobic, etc). But as an ally, you’re also affected by a system of oppression. This means that as an ally, there is much to unlearn and learn—mistakes are expected. You need to own this as fact and should be willing to embrace the daily work of doing better.”\n\nI’ve mentioned before that this is a core takeaway for developers and team leaders alike. Whether we’re creating a merge request, bringing our true selves to work, or becoming a better ally, we should do so with a low sense of shame and no ego.\n\nYou will make mistakes. I promise. We all will. But when it comes to allyship, it won’t just be a blow to the ego. It will be to the part of ourselves that loves GitLab for the people we get to work with every day and hates the idea of hurting anyone here.\n\nSo here are some notes for getting through those sticky occasions, and iterating better when someone calls out that you haven’t been the best ally:\n\n> In simple terms: say thank you, say sorry, iterate and do better.\n\nDon't:\n\n- center around yourself\n- prioritize your intention above the impact of your actions\n- deny the other person’s lived experience, derail or deflect from the issue in your apology\n- avoid arguing semantics on how the issue was brought to your attention\n- ask that person to accept inequality or microaggressions as a fact of life\n- blame them or their actions for what happened\n- retaliate against the person either actively or passively\n\nDo:\n\n- ask if they’re okay and center their experience\n- listen to what they have to say, acknowledge what happened and your role in it\n- apologize as gracefully as possible\n- understand and learn from what happened, do your homework\n- stop the behavior and modify the pattern that led to it.\n\nKeep in mind that it’s okay to ask for the person’s feedback on what you can do to be a better ally or for a clarification on what happened was problematic, but remember to center their experience and leave the other person space to refuse (fixing the behaviour is contribution you make to a more inclusive space going forward). Where I come from, it’s mandatory to add that your owe the person a pint of Guiness down at the pub after a workplace chat... maybe a coffee chat is a better call for an all-remote company though.\n\nRemember that we are more than our work or our individual behaviors. But over time, we do become associated with a track record comprising both of those things. When our colleagues do code review or call us out, it’s an opportunity for us to grow and build better habits. And as long as we continue to iterate better, our contributions to GitLab will be more meaningful and people will see us in the light of the changes we’ve made (not the small slips along the way).\n\nTL;DR\n\nBeing an ally is an ongoing journey where we have many opportunities to contribute, collaborate, learn, get feedback, and iterate better... so pretty much the same as everything else we do at GitLab. And with this one, we grow better interactions with some of the most talented developers we’ll ever get to work with.\n\nAs with every other post, this is also a collaboration... so whether it’s further resources, suggested additions, punctuation edits, or even a few callouts that I should look out for, it’s all very welcome. It's how we all grow, it’s how I hope I am becoming a better ally.\n\nCover image by [Element5 Digital](https://unsplash.com/@element5digital) on [Unsplash](https://unsplash.com)\n{: .note}\n\n[Join us](/jobs/) at GitLab! Or consider [trying us out](/free-trial/) for free.\n\n","unfiltered",[9,702,703,704,705],"security","design","releases","agile",{"slug":707,"featured":6,"template":683},"being-a-better-ally","content:en-us:blog:being-a-better-ally.yml","en-us/blog/being-a-better-ally.yml","en-us/blog/being-a-better-ally",{"_path":712,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":713,"content":719,"config":726,"_id":728,"_type":14,"title":729,"_source":16,"_file":730,"_stem":731,"_extension":19},"/en-us/blog/better-code-reviews",{"title":714,"description":715,"ogTitle":714,"ogDescription":715,"noIndex":6,"ogImage":716,"ogUrl":717,"ogSiteName":671,"ogType":672,"canonicalUrls":717,"schema":718},"Better Code Reviews GitLab Style","Better Code Reviews - A selection of tools for your tool-belt when it comes to code reviews.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663502/Blog/Hero%20Images/paperclips.jpg","https://about.gitlab.com/blog/better-code-reviews","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Better Code Reviews GitLab Style\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2020-06-08\",\n      }",{"title":714,"description":715,"authors":720,"heroImage":716,"date":721,"body":722,"category":700,"tags":723},[697],"2020-06-08","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n> A love letter to anyone that's ever reviewed or been reviewed.\n\nThis blog post originally started as a thank-you message inside the GitLab slack channel `#thanks`, however, the scope of the message grew to such a degree that I wanted to take it a step further and see if I could not only thank the amazing people this post is dedicated to, but also hopefully share some of the amazing things they taught me to help *you*, dear reader.\n\nI have always been rather passionate about feedback. For as long as I can remember, I have always sought feedback on everything I was interested in. It's as true for me in software as it is for my non computer related hobbies like bodybuilding or grammar.....**cough cough**. Feedback is so important for every aspect of life, and in software it is no different. Feedback matters and in GitLab, we deliver most if not all of our feedback to one another via the code review.\n\nThis post is designed to deliver a selection of the most fantastic things I have seen in code reviews here at GitLab, with two goals:\n\n1. Acknowledge the people who work hard to ensure the feedback cycle they provide is as good as it can be, because at GitLab we like to [say thanks](https://handbook.gitlab.com/handbook/values/#say-thanks).\n1. Offer you, the reader, a selection of tools for your toolbelt when it comes to code reviews.\n\nEnter - **Better Code Reviews**.\n\n## Self Reviews - Details Matter\n\n> Before assigning MRs to the reviewer I practice a self-review to help the reviewer and the maintainer understand quirks and caveats of the MR. I am trying to anticipate their concerns/questions. As a maintainer I find it also very valuable. - Peter Leitzen ([@splattael](https://gitlab.com/splattael))\n\nWe often take for granted that details are hard. Moreover, we often take for granted that details in software are even harder. The majority of software consists of layers upon layers of deep abstractions and obscure logic that can be difficult, if not impossible, to really understand without spending a significant amount of time parsing it line by line.\n\nThis process is made even harder when the details or context are incorrect. Though it's natural for this to happen, humans are not spell checkers, nor do the majority of us like to revisit a piece of work a fourth or fifth time to ensure it's as correct as it can be. If we all did this, nothing would ever be delivered.\n\nBut - there is a sweet spot to be found for this dilemma in software where we can keep the velocity of delivery high, and also reduce the feedback cycle time through a small amount of dedicated effort to the details. We talk about some of the details [here in the responsibility of the merge request author](https://docs.gitlab.com/ee/development/code_review.html#the-responsibility-of-the-merge-request-author).\n\nFor the merge request author, step through a checklist. Here is mine. If you can't read my chicken-scratch handwriting, I'll type it out too:\n\n![merge-checklist](https://about.gitlab.com/images/blogimages/merge-checklist.png)\n\nBefore every feedback cycle:\n\n- Re-read every line\n- Test your code locally\n- Write a test for every change (or as many as you can)\n- Write a clear description and update it after each feedback cycle\n- Include at least one screenshot per change. More is better\n- Check, re-check and re-check your labels\n- Consider using a `~\"workflow::refinement\"` label for issues ahead of time like we do in the Monitor:Health team\n- Review the code as if you were the reviewer. Be proactive, answer the likely questions, and open followup issues ahead of time\n\nIf you want to see the last and most important part in action, check out one of our frontend maintainers Natalia Tepluhina([@ntepluhina](https://gitlab.com/ntepluhina)) pre-answer a question she knew would be asked in [one of her merge requests](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/33587#note_353564612).\n\n## Conventional Comments - Communicate Intent\n\n>  **Shaming** This is horrible code. How about re-writing all of it so that it stops being that bad? - Frédéric Caplette ([@f_caplette](https://gitlab.com/f_caplette))\n\nOne of the hardest parts of getting a code review right is communicating the human touch. When we offer feedback and receive feedback, human habit creates cognitive distortion by defaulting to the most negative aspects of that feedback. At GitLab, we try to highlight that in our [value system](https://handbook.gitlab.com/handbook/values/#assume-positive-intent).\n\nIn the world of psychology, this is called **mental filtering**, and it's something that all humans have a tendency to do. Though in software this affliction can be more common, as working in software goes hand-in-hand with valuing yourself based on how intelligent others think you are.\n\nEnter [conventional comments](https://conventionalcomments.org/) by Paul Slaughter ([@pslaughter](https://gitlab.com/pslaughter)) - a well thought-out system for leaving comments in a useful way for both the reviewer and author of the merge request. It's so popular one amazing person made a [browser extension (chrome, firefox)](https://gitlab.com/conventionalcomments/conventional-comments-button) for it!\n\nSo why does adding a single bolded word to the top of a comment help with the human touch? Well, it's all about intent.\n\nWhen you start the comment with an eye-catching single word that defines the intent and tone for the comment, it gives the reader a chance to understand where your comment is coming from.\n\nLet's try an experiment. If you had submitted code for review, which comment would you prefer to read?\n\nOption one:\n\n```bash\nWhat do you think about X instead?\n```\n\nor option two:\n\n```bash\n**suggestion (non-blocking)**\n\nWhat do you think about X instead?\n```\n\nNow if you're anything like me, you took a preference to option two. It had context, communicated empathy, and was an invitation to try something different rather than a command.\n\nThe magic part of this comment is the first line `**suggestion (non-blocking)**`. Straightaway, before you even read the comment, you know the two most important things about it:\n\n1. It's a suggestion from the reviewer\n1. It's non-blocking, communicating it's more of a friendly suggestion then a hard change that's needed\n\nAnother massive advantage this style of commenting has: it allows merge request authors to understand the reviewer is neither trying to block nor act as a gatekeeper for their work. By highlighting what counts as a blocking and a non-blocking comment, merge authors get the full context of what the reviewer is trying to communicate.\n\nTo demonstrate this, let's try another thought experiment! You have submitted a merge request for review and your review comes back with eight comments.\n\n- **Scenario A: No context in comments.** All comments are treated equally because they lack context for what counts as a blocker and what doesn't.\n- **Scenario B:** Context added via conventional comments system.\n\nThe comments can be treated via priority:\n\n1. Blockers => What's needed to get the merge over the line.\n1. Non-blockers => What can be a separate merge or perhaps a discussion.\n\nNext time you're reviewing code, try using conventional comments and watch how it affects not only the way the merge request author feels about the review, but the way **you**, the reviewer, feel leaving the review. My guess is you'll feel a lot better.\n\nWe're currently looking at [integrating this feature directly into GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/26891) because we believe in making GitLab the best possible place for code reviews, and want you to have the best experience possible.\n\nIf you want to see a real-life example of some of Paul Slaughter's ([@pslaughter](https://gitlab.com/pslaughter)) awesome work using conventional comments, check out [his reviews of my community contributions](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/24897) here at GitLab. That empathy shines through.\n\n## The Patch File\n\n> Here's a patch file to better explain - Denys Mishunov ([@dmishunov](https://gitlab.com/dmishunov))\n\nWanna know a `git` secret? Patch files are the stuff of magic. If you want to read about them, [check the Git documentation for patches](https://git-scm.com/docs/git-format-patch).\n\n### How To Make A Patch File\n\nYou can make a patch file via your editor, or via the command line.\n\n#### Via The Editor\n\nRocking a nice fancy IDE or text editor? Most of them support patch files via plugins, or out of the box!\n\n- [VSCode](https://github.com/paragdiwan/vscode-git-patch)\n- [Webstorm](https://www.jetbrains.com/help/webstorm/using-patches.html)\n- [Atom](https://atom.io/packages/git-plus)\n- [Vim](https://vim.fandom.com/wiki/How_to_make_and_submit_a_patch) …life is what happens when you're trying to exit `vim`?\n\n#### Via The CLI\n\nOkay, you’ve made some commits, here’s your `git log`:\n\n```plaintext\ngit log --pretty=oneline -3\n* da33d1k - (feature_branch) Reviewer Commit 1 (7 minutes ago)\n* 66a84ah - (feature_branch) Developer 1 Commit (12 minutes ago)\n* adsc8cd - (REL-0.5.0, origin/master, origin/HEAD, master) Release 13.0 (2 weeks ago)\n```\n\nThis command creates a new file, `reviewer_commit.patch`, with all changes from the reviewer's latest commit against the feature branch:\n\n```plaintext\ngit format-patch HEAD~1 --stdout > reviewer_commit.patch\n```\n\n### Apply The Patch\n\nFirst, take a look at what changes are in the patch. You can do this easily with `git apply`:\n\n```plaintext\ngit apply --stat reviewer_commit.patch\n```\n\nJust a heads up! Despite the name, this command won't actually apply the patch. It will just show the statistics about what the patch will do.\n\nSo now that we've had a look, let's test it first, because not all patches are created equal:\n\n```plaintext\ngit apply --check reviewer_commit.patch\n```\n\nNo errors? Awesome! We can apply this patch without worry.\n\nTo apply the patch, you should use `git am` instead of `git apply`. The reason: `git am` allows you to sign off an applied patch with the reviewer's stamp.\n\n```plaintext\ngit am --signoff \u003C reviewer_commit.patch\nApplying: Reviewer Commit 1\n```\n\nNow run `git log` and you can see the `Signed-off-by` tag in the commit message. This tag makes it very easy to understand how this commit ended up in the code base.\n\n### Why to use them in code reviews\n\nSo now that you know how to make a shiny patch file, why would you use patch files as part of a code review process? There are a few reasons you might consider offering a patch file for a change you feel strongly about:\n\n1. It communicates you have invested a large amount of effort into understanding the author's solution and reasoning\n1. It demonstrates passion for reaching the best solution through teamwork\n1. It offers a willingness on the reviewer's part to accept responsibility for this merge past the point of just reading the code\n\nSome people might argue patch files are a cheeky way for a reviewer to force a change they would rather see make it into the code base, but I argue that anyone who has taken the time to check out a branch, run the project, implement a change, and then submit that change back for a discussion is embodying the value of collaboration to the fullest.\n\nWant to see a awesome example of a patch file in action? Check out one of our frontend maintainers Denys Mishunov ([@dmishunov](https://gitlab.com/dmishunov)) in action using a [patch file to its maximum potential](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/31686#note_341534370)!\n\nWe believe so much in creating the best code review experience here at GitLab, we're looking into how can we make this system a [seamless part of the merge request and code review flow](https://gitlab.com/gitlab-org/gitlab/-/issues/220044).\n\n## Fairness\n\n> Fairness is a person's ability to rise above their own prejudice.\n\nFairness is a odd word. Chris Voss, a former FBI negotiator, said in his book [Never Split The Difference](https://www.goodreads.com/book/show/26156469-never-split-the-difference) that:\n\n> “Fair”—the most powerful word in any negotiation scenario. To become a great negotiator, you must earn the reputation of being a fair one.\n\nCode reviews can be viewed as a negotiation. It's you and another human being having a negotiation, based upon the idea that at the end, the result of this negotiation should be a selection of code that is both of value and of a high standard. While you might think that FBI negotiations and code reviews have little to do with one another, the concept being a fair negotiator often can be the most useful tool in your toolbox as both an author and reviewer.\n\nYou can actually see it mentioned twice in the [permissions to play in points 2 and 7](https://handbook.gitlab.com/handbook/values/#permission-to-play) guidelines here at GitLab:\n\n- \"Be dependable, reliable, fair, and respectful.\"\n- \"Seek out ways to be fair to everyone.\"\n\n### Author Fairness\n\nBeing fair as an author is the easier of the two. When you think of being fair as an author you need to adhere to a few simple Do's and Don'ts:\n\nDo:\n- Write a proper description with screenshots (can't stress this one enough)\n- Understand a reviewers point of view when they make suggestions\n- Pre-address strange parts of your merge (we all have them)\n- Be open to [collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) on your work\n\nDon't:\n- \"plz merge\"\n- Forget to write a description with screenshots\n- Be closed off or take offense to suggestions\n- Forget to include any steps needed to get the build running or in other words(reduce burden where possible!)\n\nHonestly, it's pretty simple to be a fair author of a merge request if you use a small amount of empathy and remember that the person reviewing your code **gets nothing extra** for their time spend reviewing their code. They just want to help take your merge to the next level.\n\n### Reviewer Fairness\n\nBeing fair as a reviewers is a tad harder than being fair as an author. But why, I hear you ask? The issue is something called \"bias\" - or [unconscious-bias](https://handbook.gitlab.com/handbook/values/#unconscious-bias), as the handbook defines it.\n\nBias is, for better or for worse, something we all deal with when it comes to how we *want* things to be. We all have our own styles, preferences, and ideas on how software should be written:\n\n> Eslint be damned I want you to use double quotes!\n\nThis creates issues when it comes to code reviews, because it's normal for a lot of your own bias to bleed into a comment. You begin thinking in absolutes and the unresolved discussion count rises.\n\nLet me ask you something. Have you ever reviewed a merge request and found yourself saying things like:\n\n- \"It should be written like this?\"\n- \"Why would they do it like that?\"\n- \"I would have done it *this* way.\"\n- \"That's not how that should be done!\"\n\nWell, my friends, welcome to another common cognitive distortion called \"Should/must statements\". Do you want to be a better code reviewer? The next time you write a comment and it includes the word \"should\" or \"must\", pause right there and really think about why you felt the need to use that word. Sometimes it will be fair and warranted - such as if your company follows a set of coding conventions like we do at GitLab - but stay vigilant for when those statements are a thin veil for a personal preference. Ask yourself if you're being fair with your review. As a reviewer, if you find yourself in need of using a should/must statement, be sure to supply a reference to supporting documentation that is driving your statement.\n\nOne lesson I have learned through my own experience is that there is almost always a reason for something to be done the way it is. The fair response to something you don't agree with is to ask *why* it's being done like that, not saying it *must* be another way. That is how you become a fair and great negotiator.\n\n## The Follow Up\n\n> I feel like the follow up issue should become a first class citizen. - Sarah Yasonik ([@syasonik](https://gitlab.com/syasonik))\n\nLong merges suck. They just do. And while the concept of \"big doesn't always mean good\" might have started with food, it bleeds into the world of software development through merge requests that are too big. They also directly conflict one of our [main values](https://handbook.gitlab.com/handbook/values/#make-small-merge-requests) of iteration. In GitLab, we take this so seriously that [Danger Bot](https://docs.gitlab.com/ee/development/dangerbot.html) will ask you to break down merges that are over a certain size, helping developers champion the [value of iteration](https://handbook.gitlab.com/handbook/values/#iteration).\n\nLarge merge requests create huge amounts of complexity, they're hard to test, they're hard to reason about, they hard to maintain or extend.....and that's just for the author!\n\nSo what's worse than a large merge request? Reviewing a large merge request. If you've ever been pinged to review a merge request that was longer than 1000 lines, you understand what I am talking about. If it hasn't happened to you yet, count your lucky stars that your teammates live and breathe some good habits like simple solutions and iteration, and value a lack of complexity.\n\nThis creates a bigger problem than a complex reading exercise for the reviewer: it creates a context block. When a review grows past a certain amount of lines, it simply becomes too difficult to reason about without checking out the branch, booting the project and smoke testing. While smoke testing complex reviews are a great idea, it should **not** become the default ideal for reviewing.\n\nIf the merge request is too long, the code review is too complex / too long. The code rots, your merge conflicts grow, you can't iterate, you're constantly addressing breaking conflicts … and you're stuck for days, maybe weeks, maybe forever.\n\nSo how do we fix this? In the Monitor:Health team's iteration retrospective, my teammate Sarah Yasonik ([@syasonik](https://gitlab.com/syasonik)) raised a point where she suggested the follow up issue / merge become a first class citizen. I thought she was onto something amazing. If your merge is too long, or your reviews are taking too long, break your merge down, keep the reviews small, and offer follow-ups.\n\nTreat the follow-up merge as a first-class citizen. Do it right there *while reading the reviewer's feedback* instead of adding more code to a already too big merge! Do **not** make a already bloated merge even worse by adding more scope. Divide and conquer where possible.\n\nI think a lot of developers and reviewers find this process difficult because it's a contract of faith: \n\n- I, the author, promise to deliver a follow-up.\n- I, the reviewer, put myself on the line by taking your word that you will in fact fix this issue later.\n\nIt's scary, and lacks polish. I get it, but you should never let tomorrow's perfect stop today's progress because - spoiler alert - tomorrow isn't here, and we really only have today.\n\n### The Author Follow Up\n\nIf you offer a follow up, deliver it. It's your only rule but you cannot break it. Your credit for wilding the follow up resides solely in your consistent ability to deliver on your promises over time. As a author you should also work with your PMs and EMs to help prioritize the follow up as part of a wider team effort.\n\n### The Reviewer Follow Up\n\n- If you are offered a follow up, accept it with grace and trust the developer to make good on their promise.\n- Be open to suggesting follow ups as part of your review.\n- Be patient with people.\n- Allow for wiggle room, but know when to say no. (A follow-up for a non-critical issue is fine, but not a blatant break that won't add more lines or context.)\n\n## The Art Of The GIF\n\n>  If a Picture Speaks 1,000 Words a animated GIF Speaks 100,000.\n\nWhile this is the least technical aspect of the entire post it is perhaps the most interesting and the easiest to implement.\n\nDid you know that 93% of communication is nonverbal? I didn't, until I started seeing GIFs in code reviews. When I began to see them pop up in reviews, they deeply caught my attention, and I began to wonder why they had such a lasting impression on me as a developer.\n\nWords are powerful, but images are particularly powerful because of their ties to our emotions. Images have the potential to create powerful emotional responses, and when you see something that sparks a sense of positivity, it sets the tone for the entire review. You **understand fully** that the reviewer really cares and wants to communicate that care non-verbally.\n\nSo how do you use GIFs in your merge requests and code reviews? Well, you could [start with our handbook instructions](/handbook/product/making-gifs/), but the short and sweet version:\n\n1. Use a screen recorder to capture the video you want to show as a GIF.\n2. Grab yourself a copy of [gifify](https://github.com/vvo/gifify).\n3. GIF all day long!\n\n### GIFs That Show You Care\n\nI won't ever forget the first time I saw a [funny GIF in a code review](https://gitlab.com/gitlab-org/gitlab-ui/-/merge_requests/1193#note_307290889). I never even made it to reading the comment, because all I could comprehend was this animated GIF of a thumbs-up and I remember thinking: *This merge would pass review. It would all be okay.* The sheer childlike giddy nature of seeing this image in action made me smile ear-to-ear. Every other comment could have been a rant about how awful my code was, but I wouldn't have cared.\n\nIf I can give you one piece of advice for your code reviews as a reviewer, use GIFs in a light-hearted way, because they are:\n\n- empathy-laden\n- soften the blow of a hard topic\n- foster positivity\n- make code reviews fun!\n\n![teamwork](https://media.giphy.com/media/vcHTRiZOglHNu/giphy.gif)\n\nWe're currently looking at making [Giphy a integrated feature here at GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/17379), making your code reviews even easier and more fun!\n\n## Tactical code reviews through the value of iteration\n\n> Can we make this smaller? - Clement Ho([@ClemMakesApps](https://gitlab.com/ClemMakesApps))\n\nOne thing I have noticed that help time and time again for better code reviews is the idea of breaking down a merge request into the smallest piece possible. A few people in my time here at GitLab have really put this across as a valuable way of working, but it was my frontend engineering manager Clement Ho([@ClemMakesApps](https://gitlab.com/ClemMakesApps)) that I really took notice championing this ideal. Given that I started paying close attention to this idea and began to notice benefits almost immediately when implementing the idea.\n\nIf we look at the GitLab value handbook's [suggestions iteration competency](https://handbook.gitlab.com/handbook/values/#iteration-competency) you can see that the value in small, digestible merge requests which translates into smaller code reviews:\n\n| Level | Demonstrates Iteration Competency by… |\n|",[724,725,9],"code review","zero trust",{"slug":727,"featured":6,"template":683},"better-code-reviews","content:en-us:blog:better-code-reviews.yml","Better Code Reviews","en-us/blog/better-code-reviews.yml","en-us/blog/better-code-reviews",{"_path":733,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":734,"content":740,"config":747,"_id":749,"_type":14,"title":750,"_source":16,"_file":751,"_stem":752,"_extension":19},"/en-us/blog/gitlab-pages-for-covid",{"title":735,"description":736,"ogTitle":735,"ogDescription":736,"noIndex":6,"ogImage":737,"ogUrl":738,"ogSiteName":671,"ogType":672,"canonicalUrls":738,"schema":739},"Using GitLab Pages to Report Local COVID-19 Rates","How I used GitLab pages to publish up-to-date local infection rates.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681476/Blog/Hero%20Images/thisisengineering-raeng-0jTZTMyGym8-unsplash.jpg","https://about.gitlab.com/blog/gitlab-pages-for-covid","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Using GitLab Pages to Report Local COVID-19 Rates\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matt Nohr\"}],\n        \"datePublished\": \"2020-08-06\",\n      }",{"title":735,"description":736,"authors":741,"heroImage":737,"date":743,"body":744,"category":700,"tags":745},[742],"Matt Nohr","2020-08-06","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n## Finding Local COVID Rates\n\nI live in the U.S. state of Minnesota. Recently the state government provided recommendations for how and when to open schools in the fall. The guidance was based on the infection rates of the COVID-19 disease. In simple terms, the higher the rates, the less in-person the school should be. The actual calculation I needed was:  \n\n```\ntotal number of cases in your area over the past 2 weeks per 10,000 residents\n````\n\nI have three kids in school, so when I heard this recommendation I went to find out this value for my area. It turned out to be a difficult statistic to find. Along with the announcement my state government released a set of data, but it was about three weeks behind the current rates. I found different sets of data available, but they either reported the daily case rate or a total count of infections, not this very specific calculation.\n\nSee Also:\n- [GitLab's Handbook on COVID-19 benefits](https://about.gitlab.com/handbook/total-rewards/benefits/covid-19/)\n- [How an analytics software startup took aim at COVID-19](https://about.gitlab.com/blog/startup-covid-tracking/)\n\n## GitLab Pages to the Rescue\n\nI started by manually calculating the values with the data that I could find. This worked, but every time there were updated statistics, I had to go back and recalculate the value. I wanted a way to have this information available for me and others with the up-to-date information whenever I looked at it.\n\nMy process and output quickly evolved:\n\n1. I decided I would just write a script to download the data and do the calculation for me\n1. Once I had this working I thought the next step would be to automatically graph the results \n1. Then I thought I could publish the graphs on a website \n1. If I was going to publish them, I thought the best thing to do to get this done quickly would be to use [GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/).\n\nThe result is a simple static website: [https://mattnohr.gitlab.io/covid-county/](https://mattnohr.gitlab.io/covid-county/)\n\n![Example Chart from website](https://about.gitlab.com/images/blogimages/gitlab-pages-for-covid/output-chart.png){: .shadow.center}\n\n## How It Works\n\nThe basic flow for my new “system” is:\n\n```plantuml\n(*) --> \"Download data\"\n--> \"Calculate the rates\"\n--> \"Create a new .csv file with daily calculated values\"\n--> \"Publish .csv file to GitLab pages\"\n--> \"Use GitLab pages to serve static website that reads .csv\"\n--> (*)\n```\n\nThe first few steps are done with a simple [Kotlin](https://kotlinlang.org/) script that is run using the [Gradle build tool](https://gradle.org/). I used [GitLab CI/CD](https://docs.gitlab.com/ee/ci/) pipelines to run a job to do that automatically. You can find an [example gradle .gitlab-ci.yml file here](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Gradle.gitlab-ci.yml). The basics of this step for me look like:\n\n```yml\nbuild:\n  stage: build\n  script: gradle --build-cache run\n```\n\nThe next step was to get it published to GitLab pages. That also used a GitLab CI/CD job. It simply needed to move the .csv results out of the build directory into the “public” directory that is used to host GitLab pages. \n\n```yml\npages:\n  stage: deploy\n  dependencies:\n    - build\n  script:\n    - mv build/data.csv public/\n```\n\nThe actual static webpage uses [d3.js](https://d3js.org/) that is able to read the data from a .csv file and graph it.\n\nMy GitLab project can be found here: [https://gitlab.com/mattnohr/covid-county](https://gitlab.com/mattnohr/covid-county)\n\n## Running on a Schedule\n\nOnce I had the system up and running with GitLab CI, I was able to use [GitLab Pipeline Schedules](https://docs.gitlab.com/ee/ci/pipelines/schedules.html) to run the script a few times a day to get updated data. Now I do not have to worry about when the data is updated, I can just review my GitLab Pages site to see the latest values.\n\nPipeline Scheudles let you easily schedule pipelines daily, weekly, or monthly. Since I wanted this to run multiple times a day, I used a cron schedule to run the pipeline 4 times a day on weekdays:\n\n```\n0 8,12,16,20 * * 1-5\n```\n\n## Result\n\nNow I have a [simple website](https://mattnohr.gitlab.io/covid-county/) that has the most up-to-date calculations for this specific value for my local area. Now I just need to wait for our local school board to make a final decision on how schools will look!\n\n\u003C!-- image: image-url -->\nCover image by [@ThisisEngineering RAEng](https://unsplash.com/@thisisengineering) on [Unsplash](https://unsplash.com/photos/0jTZTMyGym8)\n{: .note}",[9,705,746,109],"testing",{"slug":748,"featured":6,"template":683},"gitlab-pages-for-covid","content:en-us:blog:gitlab-pages-for-covid.yml","Gitlab Pages For Covid","en-us/blog/gitlab-pages-for-covid.yml","en-us/blog/gitlab-pages-for-covid",{"_path":754,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":755,"content":761,"config":770,"_id":772,"_type":14,"title":773,"_source":16,"_file":774,"_stem":775,"_extension":19},"/en-us/blog/gitlab-together-with-terratag-open-source-to-help-you-manage-terraform-resources",{"title":756,"description":757,"ogTitle":756,"ogDescription":757,"noIndex":6,"ogImage":758,"ogUrl":759,"ogSiteName":671,"ogType":672,"canonicalUrls":759,"schema":760},"How to use Terratag to manage Terraform tags automatically","This blog addresses how you can do that easily and automatically when using Terraform and Terratag (an open source project by env0) on top of the Gitlab CI/CD platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682137/Blog/Hero%20Images/blog-image.png","https://about.gitlab.com/blog/gitlab-together-with-terratag-open-source-to-help-you-manage-terraform-resources","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use Terratag to automatically manage tags and labels for your Terraform Code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2021-09-14\",\n      }",{"title":762,"description":757,"authors":763,"heroImage":758,"date":765,"body":766,"category":767,"tags":768},"How to use Terratag to automatically manage tags and labels for your Terraform Code",[764],"Itzik Gan Baruch","2021-09-14","\n\nWhen using infrastructure as code (IaC) on a public cloud provider, it's important to use tags and labels to organize your IaC using their complementary services. Terratag, an open source project developed by [env0](http://www.env0.com), can be used with Terraform and placed on top of the GitLab CI/CD platform, making tagging and labeling IaC easier and more efficient.\n\nGitLab and Terraform make it easy to tag and label infrastructure as code.\n\n## Inside your toolbox\n\n[GitLab](https://about.gitlab.com/) the industry's leading DevOps platform. Not long ago, we announced the ability to control Terraform deployments, remote state management, private module registry, and merge request integration for Terraform. This gives users a range of solutions for running CI/CD for Terraform code and managing it on a large scale.\n\n[Terraform](https://www.terraform.io/) is the most widely adopted IaC framework out there. It's an open source project that is maintained by HashiCorp, and was launched in 2014 based on HashiCorp configuration language (HCL). Terraform is a command line (CLI) tool that can help manage and provision external resources such as public cloud infrastructure, private cloud infrastructure, network appliances, and SaaS and PaaS vendors. All major clouds are supported where AWS, Azure, and GCP have an official provider that are maintained internally by the HashiCorp Terraform team.\n\nAll major cloud providers support tagging/labeling for most of their resources using their Terraform provider, to help users manage infrastructure more efficiently. In this blog post, we provide some examples that show how it is easy to tag and label your IaC using Terratag with GitLab CI/CD – a core component of our DevOps platform.\n\n### How to automatically manage tags/labels for your Terraform Code\n\nFirst, we'll take a deep dive into the importance of tagging and labeling your IaC when using a public cloud provider. Next, we'll explain how to manage tags and labels for your IaC easily and automatically when using Terraform and [Terratag](https://terratag.io/) on top of the Gitlab CI/CD platform, with simple code examples for an end-to-end solution.\n\n### Why tags/labels are so important\n\nAll major cloud providers allow tagging (or labeling) cloud resources. Moreover, they encourage you to use tags or labels to do things like manage budgets, set up powerful automation algorithms, and unlock insights offered by the cloud providers and independent third parties.\n\nBy harnessing powerful IaC frameworks like Terraform, users can define and tag cloud resources for verticals ranging from the development to ops, as well as business needs.\n\n### The problem with tagging today\n\nTagging is a manual process, which can make it a real hassel, particularly as your infrastructure grows. Repeatedly tagging dozens or even hundreds of cloud resources is inefficient, but that's just the start of the problems. Manually tagging fails in other important ways too:\n\n* **Standards are hard to maintain if they're not enforced**: Your entire team needs to be on the same page – keeping an eye out for newly added cloud resources, making sure they include those tags or you may miss some significant resources when acting on that metadata later.\n\n* **Harder to change**: Applying changes to tag structure across the board quickly becomes unmanageable.\n\n* **Metadata can obscure what's important**: While tagging all this metadata is useful for slicing and dicing later, having it everywhere on your resources pollutes your IaC, making it more cumbersome and harder to maintain.\n\n* **Migration**: What if you already have plenty of Terraform modules with cloud resources, which weren't tagged to begin with? Trying to tag them all now can be painstaking work.\n\nAt the end of the day, IaC is, well, just code, and as is the case with any code, repetition makes it harder to fix errors, apply enhancements, make adjustments and maintain readability. As tagging is a cross-cutting concern, the lack of proper layering or aspect control makes it harder to retrofit existing solutions.\n\n### Terratag to the rescue\n\n[Terratag](https://terratag.io/) allows the user to automatically tag or label all the resources in their Terraform code. It also automatically tags all of your Terraform sub-modules, even if they don't expose tags as an input. Terratag is a CLI tool that works with all the major cloud providers including AWS, Google Cloud Platform, and Microsoft Azure, and solves the complicated problem of tagging resources across applications at scale. It eliminates the risk of human error, can retroactively tag IaC resources that were previously deployed, and helps you easily use the tags for various purposes, like cost management, organization, reporting, etc.\n\n### How to run Terraform with GitLab\n\nGitlab offers a wide range of tools for Terraform, starting with a [managed remote state](https://docs.gitlab.com/ee/user/infrastructure/terraform_state.html), running your deployment with [Gitlab CI/CD](https://docs.gitlab.com/ee/ci/), [Terraform private module registry](https://docs.gitlab.com/ee/user/packages/terraform_module_registry/index.html#publish-a-terraform-module-by-using-cicd) and [integration in Merge Requests (MRs)](https://docs.gitlab.com/ee/user/infrastructure/mr_integration.html) and getting Terraform plan output information into an MR.\n\nIn this tutorial, we use Gitlab CI/CD to deploy a Terraform repository into Google Cloud Platform and let Gitlab manage our remote state.\n\n### Combining Terraform wtih GitLab in GCP\n\nWe explain how to implement and combine Terraform and GitLab with ease, starting with building the deployment of our Terraform code using GitLab and then see the results in Google Cloud platform.\n\n### Terraform code with GitLab as a backend\n\nWe're using Terraform to deploy a simple VPC and a VM into GCP. We will use GitLab Terraform backend configuration, which is based on the Terraform [HTTP backend](https://www.terraform.io/docs/language/settings/backends/http.html). The beauty of this configuration is that you don't need to add any configuration regarding authentication when running it inside Gitlab CI/CD. GitLab will automatically set up all the relevant configuration for your backend according to the project it's running in.\n\nThe code is available in [the Terratag project created for this blog post](https://gitlab.com/env0/terratag-blog-post/-/tree/main).\n\n### Set up variables\n\nThis Terraform code needs some variables in order to run. We can set these up using Gitlab CI/CD variables. Under your Gitlab Project, go to Settings > CI/CD and expand the variable section. We will need to add three variables:\n\n* `GOOGLE_CREDENTIALS`: This variable value should be the JSON of your Google Cloud service account. [See this documentation](https://cloud.google.com/iam/docs/creating-managing-service-account-keys) on how to create a service account key.\n\n* `TF_VAR_project_id`: Your Google Cloud project ID.\n\n* `TF_VAR_machine_type`: The VM type you would like to create.\n\n![tg_1](https://about.gitlab.com/images/blogimages/2021-terratag-env0/tg_1.png)\n\n### Set up Gitlab CI/CD\n\nSetting up a Gitlab CI/CD for Terraform is really easy – all you need to do is add a simple file in your repository called `.gitlab-ci.yml` and add a configuration for each step of your Terraform deployment. We're going to add the following steps to our pipeline:\n\n* **Plan**: This step will run the `terraform init` and `terraform plan` commands and in the middle will also run Terratag to tag all the relevant resources. At the end it will also output the Terraform plan as a `JSON` file and create an artifact.\n\n* **Apply**: This step will run the `terraform apply` command. It depends on the plan to finish successfully. This step is done manually so we can check the plan before applying the changes.\n\n[https://gitlab.com/env0/terratag-blog-post/-/blob/main/.gitlab-ci.yml](https://gitlab.com/env0/terratag-blog-post/-/blob/main/.gitlab-ci.yml)\n\nSince Terratag scans the entire Terraform code, including any Terraform modules you may be using, we need to run the `terraform init` command before we run the Terratag command, since the init command will download all the relevant modules so Terratag can scan them.\n\nWe can see two resources in this code:\n\n* `google_compute_network`: This resource sets up the VPC. Terratag will not apply labels since the [compute network doesn't allow labels](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_network).\n\n* `google_compute_instance`: This resource sets up the VM. Terratag applies the label that the user defines.\n\nHere is the output of Terratag on this Terraform code:\n\n![tg_2](https://about.gitlab.com/images/blogimages/2021-terratag-env0/tg_2.png)\n\nThis is what this pipeline will look like in the Gitlab UI. When the Terraform plan step is successfully completed, you can manually apply the changes after reviewing the plan, which is also available as an artifact – meaning it can be downloaded and viewed locally.\n\n![tg_3](https://about.gitlab.com/images/blogimages/2021-terratag-env0/tg_3.png)\n\n### How to apply labels on GCP\n\nAs we mentioned before, labeling your resources has a lot of technical, operations, and business benefits. This blog post focuses on the cost benefit of effectivelabeling.\n\nFirst, let's see that the VM we've created is actually tagged correctly.\n\nStart by heading to the Google Cloud console. Next, go to the Compute Engine page and, under VM, search for the VM we've just created. Then, go into the VM Instance details page and see that the label exists with the right value.\n\n![tg_4](https://about.gitlab.com/images/blogimages/2021-terratag-env0/tg_4.png)\n\nNext, go to the Billing section and select \"Reports\". On the right hand side of the page there are filters. Under labels, we can filter the label key and the label value and get the cost of those resources.\n\n![tg_5](https://about.gitlab.com/images/blogimages/2021-terratag-env0/tg_5.png)\n\n### Automate labeling using Terratag\n\nTags and labels play a crucial role in managing a large-scale infrastructure projects and offer significant benefits when using tools such as [Gitlab CI/CD](https://docs.gitlab.com/ee/ci/). [Terratag](https://www.terratag.io/) has the advantage of easing the transition for Terraform users. Adopting Terratag for use with GitLab CI/CD and Terraform will also help establish a standard in your organization when it comes to use of tags and labels, eliminating the need for human intervention on a large-scale project to change your current Terraform code base.\n\nFeel free to check out the [code base](https://gitlab.com/env0/terratag-blog-post) for this blog post and leave us feedback.\n\n_Blog post coauthor [Omry Hay](https://www.linkedin.com/in/omryhay/) is the co-founder and CTO of [env0](http://www.env0.com)_\n","engineering",[769,9],"DevOps",{"slug":771,"featured":6,"template":683},"gitlab-together-with-terratag-open-source-to-help-you-manage-terraform-resources","content:en-us:blog:gitlab-together-with-terratag-open-source-to-help-you-manage-terraform-resources.yml","Gitlab Together With Terratag Open Source To Help You Manage Terraform Resources","en-us/blog/gitlab-together-with-terratag-open-source-to-help-you-manage-terraform-resources.yml","en-us/blog/gitlab-together-with-terratag-open-source-to-help-you-manage-terraform-resources",{"_path":777,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":778,"content":784,"config":790,"_id":792,"_type":14,"title":793,"_source":16,"_file":794,"_stem":795,"_extension":19},"/en-us/blog/gitops-as-the-evolution-of-operations",{"title":779,"description":780,"ogTitle":779,"ogDescription":780,"noIndex":6,"ogImage":781,"ogUrl":782,"ogSiteName":671,"ogType":672,"canonicalUrls":782,"schema":783},"GitOps viewed as part of the Ops evolution","Examine the evolution that led to GitOps","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682062/Blog/Hero%20Images/food-train.jpg","https://about.gitlab.com/blog/gitops-as-the-evolution-of-operations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps viewed as part of the Ops evolution\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2021-07-12\",\n      }",{"title":779,"description":780,"authors":785,"heroImage":781,"date":787,"body":788,"category":767,"tags":789},[786],"Viktor Nagy","2021-07-12","\n\nGitOps is a hot topic in the world of operations, but what does it provide to workflows that we didn’t have already? Looking at the evolution of the operations space, there have been many big changes in the past 20 years, and I argue that [GitOps](/topics/gitops/) is not a change, rather a summary of best practices. So, how do we describe the major phenomenon in Ops over the past 20 years? \n \nBefore 2000, the primary approach to operations was to hire a System Administrator or empower Lead Developers to do Ops work. System Administrators knew a lot about networking and server optimisations, and a good sysadmin can do most of their work through code, using Bash, Perl or Python scripts. While every software engineer likely knows at least the basics of shell scripting, even many backend engineers would not be comfortable with the level of bash scripting needed in traditional IT. \n \nBesides bash, there were situations where the infrastructure was managed through graphical user interfaces. Most enterprise IT software shipped with some level of graphical UI. This area was particularly alien to software developers. I first worked as a system administrator at a student house in Hungary. We used Novell tools to manage our network, create backups, and set up workstations. To be successful, I had to learn a lot about the tools and the domain, while my programming skills were pretty minimal.\n \nToday, a cloud-native \"system administrator\" does their job primarily through API calls. The APIs are triggered through some infrastructure as code approaches. Thus, even the sysadmins of today require much more advanced coding skills than they needed 20 years ago. Moreover, codefying your infrastructure enables battle-tested software development best practices, like testing, be introduced in operations, too. \n\nThis is a huge change compared to where we were 20 years ago. What has changed that got us to where we are now and how does it relate to GitOps?\n\n## The story\n \n\u003Ciframe src='https://cdn.knightlab.com/libs/timeline3/latest/embed/index.html?source=1_ZqRL3FjiRWlwW0Nx6imkrDcCbQtiFV4tJvR1JLiy3s&font=Default&lang=en&initial_zoom=2&height=650' width='100%' height='650' webkitallowfullscreen mozallowfullscreen allowfullscreen frameborder='0'>\u003C/iframe>\n \n### The first signals at Google\n\nThe System Administrator era is the initial period where our story starts. As we move forward, the first milestone is in 2003. For our story, two notable events happened during 2003. First, [Google presented Borg](https://research.google/pubs/pub43438/), their internal container management system that later became [Kubernetes](/blog/gitlab-kubernetes-agent-on-gitlab-com/). Second, Google hired Benjamin Treynor, and the SRE approach started with his collaboration. Let's stop here for a minute to speak about the core aspects of the SRE approach!\n\n[Site Reliability Engineering (SRE)](https://sre.google/) is a software engineering approach to IT operations. Software engineers write software to reach a goal, there is likely a process around delivering the software that includes code reviews and tests, and there are success metrics attached to the delivered output. These success metrics in the context of SRE are called Service Level Indicators, and there are related Service Level Objectives and Service Level Agreements. By applying software engineering practices to operations, the reliability and scalability of the system can be better understood and improved. Moreover, the automations that emerge from the approach enable the development teams to be more efficient as they can often self-serve their requirements.\n \n### The public cloud\n\nLet’s continue our story. For many companies around the world, an important development was Amazon Web Services (AWS). AWS launched in 2006 with 3 services: S3, SQS and EC2. Together, these services enabled companies to switch to AWS or to start their business on [AWS infrastructure](/blog/deploy-aws/). Amazon's market share has made it the leading cloud provider today, and their name is coupled tight with public clouds. As increasing workloads migrated to the cloud, the way of operations had to adapt. \n \nIn past years, I've run many interviews with IT operations professionals and asked them about their [infrastructure as code (IaC) practices](/topics/gitops/infrastructure-as-code/). From these interviews, a very strong pattern emerged around IaC adoption. Companies usually switch to IaC as they move their infrastructure to the cloud. Simply, managing dozens of cloud services through a UI is very problematic, and managing them through a single codebase is much more convenient. Together with the move to the cloud, there is a strong push to improve operations practices, and move towards more automated approaches.\n \n### The appearance of DevOps\n\nWhile the struggles of software delivery were well-known by 2009, the SRE approach pioneered at Google was not as widely adopted. As agile started to be formalized in 2000, it seemed that we found a solution to the problem of delivering the built services in front of the user becoming more and more stringent. As a result of many discussions around this topic, Patric Debois coined the term DevOps in 2009. \n \n> DevOps describes the cultural changes required in order to enable high-quality service delivery. The core idea of DevOps is to create a well-oiled process around service delivery by setting shared goals and clear ownerships. The many approaches to DevOps are highlighted by [the 9 types presented as DevOps team topologies](https://web.devopstopologies.com/).\n \nJust like many agile techniques existed before agile was formalized, the SRE approach existed before the term DevOps came to be, and it can be considered an implementation of DevOps. There are just as many agile techniques as there are ways to implement DevOps. \n \n### Containers to drive the process\n\nIn 2013, several developments were made. O'Reilly published the first book on DevOps, and the operations space got a new tool - docker - which led the way to containerisation and changed our industry tremendously. Containerisation provides a standard way to ship software. Previously, engineers could build a Debian package or a Java jar file. Basically, every technology had its own packaging solution, and there are _many_ technologies. Containers provide a single, standard way to package an application, enabling both developers to own what happens inside the container and infrastructure teams to support developers to ship containers reliably and quickly into production.\n \nThe idea of containerisation solves another problem, that of stale resources. For a long time, operations had to start different servers for various workloads, dependencies of workloads had to be taken care of, and that led to stale servers and huge inefficiencies, but we did not have a good model around orchestrating the workloads. Apache Mesos was presented in 2009 and Docker Swarm in 2014, indicating innovation in this space. In 2014, Kubernetes was presented as the open source version of Google's Borg system, and it quickly became the leading solution in this area. When released, it already supported docker containers, provided declarative infrastructure management through the Kube API, and came with a reconciliation loop at its core. Basically, the end user describes the expected state and sends it to the system, and Kubernetes tries to reach and maintain that state. Using an API for cloud operations was not new any more, still describing what we want to see, instead of imperatively commanding the system to take specific actions is a novel approach. Moreover, this enables the system to self-heal, as it can always aim at reaching the desired state. Beside better resource utilisation, these are the core values of container orchestrators.\n\n### The summary is GitOps\n\nOur story slowly gets to its end in 2017 when the GitOps term was coined. GitOps provides a summary of what we had already without adding anything new to the picture. Even though the summary was known, this workflow did not have a name yet. The cultural changes required for modern IT operations are described by DevOps and shown in the SRE approach. Automation has been with us since the advent of continuous integration, and new tools like AWS, containers, and Kubernetes enabled it in operations too. Finally, Kubernetes provides a way for the system to take care of itself (more or less), and provides a self-healing aspect of automation. As Gene Kim wrote in the _Phoenix Project_, “The Second Way is about creating the right to left feedback loops”. Coupling this with storing all the code that describes our system in a versioned manner, applying them automatically through a well-defined process, and finally using a self-healing system is what we call GitOps. \n\n## What does it mean to you\n \nAt GitLab, our [vision](https://about.gitlab.com/direction/#vision) is to provide a single application for the whole DevSecOps lifecycle. As part of this, GitLab offers one of the leading CI automation tools, and our dedicated [Infrastructure as Code](https://docs.gitlab.com/ee/user/infrastructure) and [Kubernetes Management](https://docs.gitlab.com/ee/user/project/clusters/) enable best practice operations for modern ops teams. We understand that many services are run in legacy infrastructures, where automation is very problematic, and some companies do not have the resources or need to move to Kubernetes. As shown above, the canonical definition of GitOps is not feasible in these situations. Thankfully, the value of GitOps is minor compared to the value of a strong DevOps culture combined with the automation enabled by the target systems.\n \nAs a result, I encourage everyone to approach GitOps by understanding their current level of DevOps practices as GitOps will emerge naturally from following well-known practices in the DevOps area.\n \nCover image by [Sigmund](https://unsplash.com/@sigmund?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/evolution?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n\n## Read more on GitOps with GitLab: \n\n- [GitOps with GitLab: Infrastructure provisioning with GitLab and Terraform](/blog/gitops-with-gitlab-infrastructure-provisioning/)\n\n- [Here's how to do GitOps with GitLab](/blog/gitops-with-gitlab/)\n\n- [How to use a push-based approach for GitOps with GitLab scripting and variables](/blog/how-to-agentless-gitops-vars/)\n\n- [GitOps with GitLab: Connect with a Kubernetes cluster](/blog/gitops-with-gitlab-connecting-the-cluster/)\n\n",[769,9],{"slug":791,"featured":6,"template":683},"gitops-as-the-evolution-of-operations","content:en-us:blog:gitops-as-the-evolution-of-operations.yml","Gitops As The Evolution Of Operations","en-us/blog/gitops-as-the-evolution-of-operations.yml","en-us/blog/gitops-as-the-evolution-of-operations",{"_path":797,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":798,"content":804,"config":814,"_id":816,"_type":14,"title":817,"_source":16,"_file":818,"_stem":819,"_extension":19},"/en-us/blog/gitops-done-3-ways",{"title":799,"description":800,"ogTitle":799,"ogDescription":800,"noIndex":6,"ogImage":801,"ogUrl":802,"ogSiteName":671,"ogType":672,"canonicalUrls":802,"schema":803},"3 Ways to approach GitOps","Learn about how GitLab users can employ GitOps to cover both Kubernetes and non-Kubernetes environments","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669635/Blog/Hero%20Images/gitops-cover.jpg","https://about.gitlab.com/blog/gitops-done-3-ways","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 Ways to approach GitOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Saumya Upadhyaya\"},{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2021-04-27\",\n      }",{"title":799,"description":800,"authors":805,"heroImage":801,"date":808,"body":809,"category":767,"tags":810},[806,807],"Saumya Upadhyaya","Dov Hershkovitch","2021-04-27","\n\nThe term [\"GitOps\"](/topics/gitops/) first emerged in the Kubernetes community as a way for organizations to enable Ops teams move at the pace of application development. With improved automation and less risk, GitOps is quickly becoming the workflow of choice for infrastructure automation.\n\nAt GitLab, the approach to GitOps goes beyond Kubernetes. Before the buzz around GitOps picked up in the DevOps community, GitLab users and customers were applying GitOps principles to all types of infrastructure, including physical servers, virtual machines, containers, and Kubernetes clusters ([multicloud](/topics/multicloud/) and on-premise).\n\n## What is GitOps?\n\nThere are two main [approaches to GitOps](https://www.gitops.tech/), a push-based approach and a pull-based approach.\n\n- *Push-based approach*: A CI/CD tool pushes the changes to the environment. Applying GitOps via push is consistent with the approach used for application deployment. In this case, deployment targets for a push-based approach are not limited to Kubernetes.\n![push based deployment](https://about.gitlab.com/images/blogimages/gitops-push.png){: .shadow.medium.center}\nHow the push-based approach works for GitOps.\n{: .note.text-center}\n\n- *Pull-based approach*: An agent installed in a cluster pulls changes whenever there is a deviation from the desired configuration. In the pull-based approach, deployment targets are limited to Kubernetes and an agent must be installed in each Kubernetes cluster.\n![pull based deployment](https://about.gitlab.com/images/blogimages/gitops-pull.png){: .shadow.medium.center}\nHow the pull-based approach works for GitOps.\n{: .note.text-center}\n\n## How to employ GitOps principles using GitLab\n\nGitLab supports both of the approaches mentioned above, which can be used with and without a Kubernetes agent. Along with the [recently introduced Kubernetes agent](/blog/gitlab-kubernetes-agent-on-gitlab-com/), GitLab supports GitOps principles by supporting a three types of deployment targets and environments: The single application for infrastructure code; configurations using CI/CD for automation; and merge requests for collaboration and controls.\n\nBelow we unpack three methods for applying GitOps principles using GitLab technology.\n\n### Push using manually configured CI/CD release targets\n\nThe infrastructure configurations are stored in git. The user sets up the [supported deployment targets](/install/) and uses the standard CI/CD workflow to push infrastructure changes. To ensure the desired state in the repository is consistent with the environment, CI/CD will need to run on a regular schedule to identify drift and reconcile as required. Manual intervention may be required at times to cater to failed pipelines. Many GitLab users have been using this approach to push infrastructure changes to their test, staging, and production environments.\n\nThe manual push approach is ideal for both Kubernetes and supported non-Kubernetes environments, such as embedded systems, on-premise servers, mainframes, virtual machines, or FaaS offerings.\n\n### Push using Terraform\n\nIn this approach, an out-of-the box [integration with Terraform](https://docs.gitlab.com/ee/user/infrastructure/) helps Terraform users seamlessly implement GitOps workflows using GitLab. Terraform manifests are stored in the Git repository where users can collaborate on changes within the merge requests. The Terraform plan reports can be displayed within the merge requests and the Terraform state can be stored using the GitLab-managed Terraform state backend. Everything is integrated into GitLab, which spares users from performing these tasks via third-party tools or integrations.\n\nThe push approach is ideal for both Kubernetes and non-Kubernetes deployment targets that are supported by Terraform.\n\n### Pull using a Kubernetes agent\n\nIn fall 2020, GitLab [introduced a Kubernetes agent](/blog/gitlab-kubernetes-agent-on-gitlab-com/) that initiates a secure web-socket connection from a Kubernetes cluster to a GitLab instance. There is a GitLab server component that polls for any repository changes on the server and informs the agent when there is a deviation between the desired state and the cluster environment. This process helps minimize the load on the cluster and network. Whenever a drift is detected the agent pulls the latest configurations from the git repository and updates the environment accordingly. This GitOps approach requires the Kubernetes agent to be installed on every Kubernetes cluster, which can be done with ease as the GitLab Agent for Kubernetes uses GitOps principles to install and update the agent as required. This GitOps method is ideal for Kubernetes environments only.\n\n![kubernetes agent](https://about.gitlab.com/images/blogimages/gitops-agent.png){: .shadow.medium.center}\nInside the pull-based approach using a Kubernetes agent.\n{: .note.text-center}\n\n### Up next: Push using a Kubernetes agent\n\nGitLab also aims to support GitOps is by using a push approach with a Kubernetes agent. The push based approach using manually configured Kubernetes target attaches a Kubernetes cluster to GitLab through a certificate exchange. This approach leverages the CI/CD workflow for infrastructure automation and is fairly straightforward, but it also introduces risk by opening up a firewall and using cluster admin rights for cluster integration. To overcome these challenges while leveraging the CI/CD workflow - the [push-based approach using the Kubernetes agent](https://gitlab.com/groups/gitlab-org/-/epics/5528) aims to reuse the web-socket interface to establish a secure connection between GitLab and the Kubernetes cluster and allows GitLab CI/CD to securely push changes using this interface. When available, this approach would also provide a migration path for users who are currently setting up the Kubernetes integration using a certificate exchange.\n\nThe third approach is ideal for Kubernetes environments only. When available, it can be used in conjuction with the pull-based approach to optimize the GitOps workflow.\n\n## Accelerate the SDLC with GitOps principles\n\nWhether you are using physical, virtual, containers, Kubernetes - on-prem or cloud-based infrastructures – GitLab uses GitOps principles a variety of ways to meet your team wherever it's at. GitLab supports many different options because we understand the typical organization has a mixed IT landscape, with various heterogeneous technologies in a number of different environments.\n\n***What’s your preferred approach to GitOps?*** Drop us a comment.\n\n## Learn more about GitOps at GitLab\n\nRead on to explore how GitLab works with different technologies to deliver a GitOps solution for every company at every stage.\n\n* ***Blog***: [A new era of Kubernetes integrations on GitLab.com](/blog/gitlab-kubernetes-agent-on-gitlab-com/)\n* ***Webcast***: [GitLab and HashiCorp - A holistic guide to GitOps and the Cloud Operating Model](/webcast/gitlab-hashicorp-gitops/)\n* ***Testimonial***: [Shaping a financial service’s cloud strategy using GitLab and Terraform](https://www.youtube.com/watch?v=2LF3eOoGV_o&list=PLFGfElNsQthb4FD4y1UyEzi2ktSeIzLxj&index=6)\n\nCover image by [Rodolfo Cuadros](https://unsplash.com/@rocua18) on [Unsplash](https://unsplash.com/photos/JKzgp6vhJ8M)\n{: .note}\n",[9,811,812,813],"CD","kubernetes","cloud native",{"slug":815,"featured":6,"template":683},"gitops-done-3-ways","content:en-us:blog:gitops-done-3-ways.yml","Gitops Done 3 Ways","en-us/blog/gitops-done-3-ways.yml","en-us/blog/gitops-done-3-ways",{"_path":821,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":822,"content":828,"config":835,"_id":837,"_type":14,"title":838,"_source":16,"_file":839,"_stem":840,"_extension":19},"/en-us/blog/gitops-with-gitlab-auto-devops",{"title":823,"description":824,"ogTitle":823,"ogDescription":824,"noIndex":6,"ogImage":825,"ogUrl":826,"ogSiteName":671,"ogType":672,"canonicalUrls":826,"schema":827},"Connecting Kubernetes clusters to GitLab with Auto DevOps","This is the 6th article in a series of tutorials on how to do GitOps with GitLab","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663397/Blog/Hero%20Images/logoforblogpost.jpg","https://about.gitlab.com/blog/gitops-with-gitlab-auto-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: Connecting GitLab with a Kubernetes cluster - Auto DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-02-08\",\n      }",{"title":829,"description":824,"authors":830,"heroImage":825,"date":831,"body":832,"category":767,"tags":833},"GitOps with GitLab: Connecting GitLab with a Kubernetes cluster - Auto DevOps",[786],"2022-02-08","\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can find the entire \"Ultimate guide to GitOps with GitLab\" tutorial series [here](/blog/the-ultimate-guide-to-gitops-with-gitlab/)._\n\nIn this article we will look at how one can use Auto DevOps with all its bells and whistles to easily manage deployments.\n\n## Prerequisites\n\nThis article builds upon the previous tutorials in this series. We will assume that you have a Kubernetes cluster connected to GitLab using the GitLab Agent for Kubernetes, and you understand how the CI/CD tunnel works.\n\nIf this is not the case, I recommend to follow the previous articles to have a similar setup from where we will start today.\n\n## What is Auto DevOps\n\nAuto DevOps is GitLab's answer to the complexity of software application delivery. It is a set of opinionated templates that can be used \"as-is\" or can be used to fast-track your own pipeline building. For some setups it works from testing through various security and compliance checks to canary deployments. Even if you have a less supported setup, you should be able to reuse some of its components, from security linting to deployment.\n\nYou can read more about the various [features built into Auto DevOps in our documentation](https://docs.gitlab.com/ee/topics/autodevops/).\n\n## The plan for building and deploying a minimul application\n\nThe plan for this article is to build and deploy a minimal application. The focus will be on showing how you can get started quickly, without any modifications on the Auto Deploy pipelines.\n\nThis setup will use the already known CI/CD tunnel. There will be a separate article that shows how to replace the \"Auto Deploy\" part of Auto DevOps with GitOps style deployments.\n\nIn this article, we will deploy a simple hello world application. This is not a tutorial about Auto DevOps, so we will only focus on the setup needed when used together with the GitLab Agent for Kubernetes.\n\nYou can see the final repository under https://gitlab.com/gitlab-examples/ops/gitops-demo/hello-world-service/.\n\n## How to build the application\n\nIn this section we will create our super simple hello world application and put a Dockerfile beside it.\n\n1. Start a new project.\n1. Add `src/main.py` with the following content:\n    ```python\n    # From https://gist.github.com/davidbgk/b10113c3779b8388e96e6d0c44e03a74\n    import http.server\n    import socketserver\n    from http import HTTPStatus\n\n    class Handler(http.server.SimpleHTTPRequestHandler):\n        def do_GET(self):\n            self.send_response(HTTPStatus.OK)\n            self.end_headers()\n            self.wfile.write(b'Hello world')\n\n    httpd = socketserver.TCPServer(('', 5000), Handler)\n    httpd.serve_forever()\n    ```\n1. Create the `Dockerfile` with:\n   ```\n   FROM python:3.9.10-slim-bullseye\n\n   WORKDIR /app\n\n   COPY ./src .\n\n   EXPOSE 5000\n\n   CMD [ \"python\", \"main.py\" ]\n   ```\n1. Commit the change to the repository.\n\n## How to set up Auto DevOps\n\n1. [Share the CI/CD tunnel](https://docs.gitlab.com/ee/user/clusters/agent/work_with_agent.html) with the hello-world project. Note, that the Agent configuration project amd the application project should be in the same project hierarchy and the Agent configuration project needs to be higher in this hierarchy.\n    ```yaml\n    ci_access:\n      # This agent is accessible from CI jobs in projects in these groups\n      projects:\n        - id: \u003Cpath>/\u003Cto>/\u003Cyour>/\u003Cproject>\n    ```\n1. Find out the Kubernetes context name. The agent context name is `\u003Cnamespace>/\u003Cgroup>/\u003Cproject>:\u003Cagent-name>`. You can see the available contexts in CI with the following job:\n    ```yaml\n    contexts:\n      stage: .pre\n      image:\n        name: bitnami/kubectl:latest\n        entrypoint: [\"\"]\n      script:\n        - kubectl config get-contexts \n    ```\n1. Create your `.gitlab-ci.yml` file to have Auto DevOps working:\n    ```yaml\n    include:\n        template: Auto-DevOps.gitlab-ci.yml\n\n    variables:\n        # KUBE_INGRESS_BASE_DOMAIN is the application deployment domain and should be set as a variable at the group or project level.\n        KUBE_INGRESS_BASE_DOMAIN: 74.220.23.215.nip.io\n        KUBE_CONTEXT: \"gitlab-examples/ops/gitops-demo/k8s-agents:demo-agent\"\n        KUBE_NAMESPACE: \"demo-agent\"\n\n        # Feel free to enable any of these\n        TEST_DISABLED: \"true\"\n        CODE_QUALITY_DISABLED: \"true\"\n        LICENSE_MANAGEMENT_DISABLED: \"true\"\n        BROWSER_PERFORMANCE_DISABLED: \"true\"\n        LOAD_PERFORMANCE_DISABLED: \"true\"\n        SAST_DISABLED: \"true\"\n        SECRET_DETECTION_DISABLED: \"true\"\n        DEPENDENCY_SCANNING_DISABLED: \"true\"\n        CONTAINER_SCANNING_DISABLED: \"true\"\n        DAST_DISABLED: \"true\"\n        REVIEW_DISABLED: \"true\"\n        CODE_INTELLIGENCE_DISABLED: \"true\"\n        CLUSTER_IMAGE_SCANNING_DISABLED: \"true\"\n        POSTGRES_ENABLED: \"false\"\n    ```\n1. Commit the changes.\n\nAs you can see, I disabled many Auto DevOps functionalities in the above CI YAML. I did this for two reasons:\n\n1. Some of these features require a Premium or Ultimate license or tests in the repo. I wanted to keep this tutorial \"stable\" for everyone.\n1. Every use case differs a little bit and Auto DevOps allows a large number of customizations. I wanted to highlight this by showing you the most basic ones. Read more about [customizing Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/customize.html). If you would like [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) support, just remove the `REVIEW_DISABLED` line.\n\nThere are actually only three settings to get the Auto DevOps pipeline up and running:\n\n- The `KUBE_CONTEXT` specifies the context used for the connection, it's provided by the GitLab Agent for Kubernetes.\n- The `KUBE_NAMESPACE` specifies the Kubernetes namespace to target with the deployments. This namespace will be used as we apply the Helm charts used behind the hood.\n- The `KUBE_INGRESS_BASE_DOMAIN` sets up an Ingress and enables user friendly access to the deployed service. \n\n## Recap\n\nA very common setup I see with GitLab customers is that the development team is responsible for writing the application code and packaging it into a Docker container. During this process, they take care of basic testing as well, but they are not familiar with all the security and compliance requirements or the deployment pipelines used within the company. The presented setup and the Auto DevOps suite of templates serves these teams. As you can see, the teams need minimal GitLab CI setup to run a complex pipeline that can take care of many of their requirements.\n\n## What's next\n\nIn the next article, I will show you how to deploy an application project with a GitOps style workflow.\n\n_[Click here](/blog/the-ultimate-guide-to-gitops-with-gitlab/) for the next tutorial._\n",[9,812,834],"tutorial",{"slug":836,"featured":6,"template":683},"gitops-with-gitlab-auto-devops","content:en-us:blog:gitops-with-gitlab-auto-devops.yml","Gitops With Gitlab Auto Devops","en-us/blog/gitops-with-gitlab-auto-devops.yml","en-us/blog/gitops-with-gitlab-auto-devops",{"_path":842,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":843,"content":848,"config":854,"_id":856,"_type":14,"title":857,"_source":16,"_file":858,"_stem":859,"_extension":19},"/en-us/blog/gitops-with-gitlab-infrastructure-provisioning",{"title":844,"description":845,"ogTitle":844,"ogDescription":845,"noIndex":6,"ogImage":825,"ogUrl":846,"ogSiteName":671,"ogType":672,"canonicalUrls":846,"schema":847},"GitOps with GitLab: Infrastructure provisioning with GitLab and Terraform","In part two of our GitOps series, we set up the infrastructure using GitLab and Terraform. Here's everything you need to know.","https://about.gitlab.com/blog/gitops-with-gitlab-infrastructure-provisioning","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: Infrastructure provisioning with GitLab and Terraform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2021-11-04\",\n      }",{"title":844,"description":845,"authors":849,"heroImage":825,"date":850,"body":851,"category":767,"tags":852},[786],"2021-11-04","\n\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can find the entire \"Ultimate guide to GitOps with GitLab\" tutorial series [here](/blog/the-ultimate-guide-to-gitops-with-gitlab/)._\n\nThis post focuses on setting up the underlying infrastructure using GitLab and Terraform. \n\nThe first step is to have a network and some computing instances that we can use as our Kubernetes cluster. In this project, I’ll use [Civo](https://www.civo.com) to host the infrastructure as it has the most minimal setup, but the same can be achieved using any of the hyperclouds. GitLab documentation provides examples on how to set up a [cluster on AWS](https://docs.gitlab.com/ee/user/infrastructure/clusters/connect/new_eks_cluster.html) or [GCP](https://docs.gitlab.com/ee/user/infrastructure/clusters/connect/new_gke_cluster.html).\n\nWe want to have a project that describes our [infrastructure as code (IaC)](/topics/gitops/infrastructure-as-code/). As Terraform is today the de facto standard in infrastructure provisioning, we’ll use Terraform for the task. Terraform requires a state storage backend; We will use the GitLab managed Terraform state that is very easy to get started. Moreover, we will set up a pipeline to run the infrastructure changes automatically if they are merged to the main branch.\n\n## What infrastructure related steps are we going to codify?\n\n1. Create a VPC\n2. Set up a Kubernetes cluster\n\nActually, we will create separate Terraform projects for these 3 steps under a single GitLab project. We split the infrastructure because in a real world scenario, these projects will likely be a bit bigger, and Terraform slows down quite a lot if it has to deal with big projects. In general, it is a good practice to have small Terraform projects, and think about the infrastructure in a layered way, where higher layers can reference the output of lower layers. There are [many ways to access the output of another Terraform project](https://www.terraform.io/docs/language/state/remote-state-data.html#alternative-ways-to-share-data-between-configurations), and we leave it up to the reader to learn more about these. In this case, we will use simple data resources.\n\nAfter this long intro, let’s get started!\n\n## Creating the network\n\nFirst, let’s create a new GitLab project. You can use either an empty project or any of the project templates. If you plan to do all these tutorials, I recommend starting with the [Cluster Management Project template](https://docs.gitlab.com/ee/user/clusters/management_project_template.html). Once the project is ready, let’s create the following files:\n\n- A `terraform/network/main.tf` file:\n\n```hcl\nterraform {\n  required_providers {\n    civo = {\n      source = \"civo/civo\"\n      version = \"0.10.10”\n    }\n  }\n  backend \"http\" {\n  }\n}\n\n# Configure the Civo Provider\nprovider \"civo\" {\n  token = var.civo_token\n  region = local.region\n}\n\nresource \"civo_network\" \"network\" {\n    label = \"development\"\n}\n```\n\nThis file describes almost everything we want this project to do. The first block configures Terraform to use the `civo/civo` provider and a simple `http` backend for state storage. As I mentioned above, we will use [the GitLab managed Terraform state](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html), that acts like an `http` backend from Terraform’s point of view. The GitLab backend is versioned and encrypted by default, and GitLab CI/CD contains all the environment variables needed to access it. I will demonstrate later how you can access the backend either from the local command line or from GitLab CI/CD.\n\nNext we configure the `Civo` provider. You can see that here we use two variables, an input and a local variable. These will be defined in separate files below. Finally, we describe a network and give it the “development” label.\n\n- A `terraform/network/outputs.tf` file:\n\n```hcl\noutput \"network\" {\n  value = civo_network.network.id\n}\n```\n\nThis file just provides the network id as an output variable from Terraform. Other projects could consume it. We won’t use this, but I consider it a good practice as it might help to debug issues.\n\n- A `terraform/network/locals.tf` file:\n\n```hcl\nlocals {\n  region = \"LON1\"\n}\n```\n\nHere we define the `region` local as mentioned under the description of the `main.tf` file. Why aren’t we making it an input variable? Because this is closely related to our infrastructure and for this reason we want to keep it in code. It should be version controlled and changes should be reviewed following the team’s processes. We could write the values into a `.tfvars` file also to achieve versioning and have it as a variable. I prefer to keep it in `hcl` to have it closer to the rest of the code.\n\n- A `terraform/network/variables.tf` file:\n\n```hcl\nvariable \"civo_token\" {\n  type = string\n  sensitive = true\n}\n```\n\nFinally, we define the Civo access token as an input variable.\n\nNow, we are ready with the Terraform code, but we cannot access the GitLab state backend yet. For that we either need to configure our local environment or GitLab CI/CD. Let’s see both setups.\n\n## Running Terraform locally\n\nYou can run Terraform either locally or using GitLab CI/CD. The following two sections present both approaches.\n\n### Accessing the GitLab Terraform state backend locally\n\nThe simplest way to configure the “http” backend is using environment variables. There are many environment variables needed though! For this reason, I prefer to use a collection of [direnv](https://direnv.net/) files. We will need all these environment variables configured:\n\n```\nTF_HTTP_PASSWORD\nTF_HTTP_USERNAME\nTF_HTTP_ADDRESS\nTF_HTTP_LOCK_ADDRESS\nTF_HTTP_LOCK_METHOD\nTF_HTTP_UNLOCK_ADDRESS\nTF_HTTP_UNLOCK_METHOD\nTF_HTTP_RETRY_WAIT_MIN\n```\n\nDirenv enables us to add a few files to our repository to describe the above environment variables in a nice and scalable way. Clearly, there are some variables that are sensitive, like `TF_HTTP_PASSWORD`, so this should not be stored in git. Moreover, we could reuse most of these variables in the other two Terraform projects we are going to create. With these considerations in mind, let’s create the following 3 files:\n\n- Create `terraform/network/.envrc`: \n\n```\nexport TF_STATE_NAME=civo-${PWD##*terraform/}\nsource_env ../../.main.env\n```\n\nThis sets the `TF_STATE_NAME` variable to `civo-network` using some bash magic and loads the `.main.env` file from the root of the repository using the `source_env` method provided by `direnv`. This can be added to version control safely.\n\n- Create `.main.env`:\n\n```\nsource_env_if_exists ./.local.env\n\nCI_PROJECT_ID=28431043\nexport TF_HTTP_PASSWORD=\"${CI_JOB_TOKEN:-$GITLAB_ACCESS_TOKEN}\"\nexport TF_HTTP_USERNAME=\"${GITLAB_USER_LOGIN}\"\nexport GITLAB_URL=https://gitlab.com\n\nexport TF_VAR_remote_address_base=\"${GITLAB_URL}/api/v4/projects/${CI_PROJECT_ID}/terraform/state\"\nexport TF_HTTP_ADDRESS=\"${TF_VAR_remote_address_base}/${TF_STATE_NAME}\"\nexport TF_HTTP_LOCK_ADDRESS=\"${TF_HTTP_ADDRESS}/lock\"\nexport TF_HTTP_LOCK_METHOD=\"POST\"\nexport TF_HTTP_UNLOCK_ADDRESS=\"${TF_HTTP_LOCK_ADDRESS}\"\nexport TF_HTTP_UNLOCK_METHOD=\"DELETE\"\nexport TF_HTTP_RETRY_WAIT_MIN=5\n\n# export TF_LOG=\"TRACE\"\n```\n\nThis file contains the bulk of the environment variables we need, and can be added to version control safely as no secrets are stored there. The first line loads the `.local.env` file that will contain the sensitive values, again using a `direnv` method. The second line contains the GitLab project ID. This is shown under the project name of your GitLab project. The next three lines configure access to GitLab. The username and password will be populated from the `local.env` file, while the `GITLAB_URL` variable is there to help you if you are on a self-managed GitLab instance.\n\n- Create `.local.env` and add it to `.gitignore`:\n\n```\nGITLAB_ACCESS_TOKEN=\u003Cyour GitLab personal access token>\nGITLAB_USER_LOGIN=\u003Cyour GitLAb username>\nexport TF_VAR_civo_token=\u003Cyour Civo access token>\n```\n\nClearly, I cannot provide the values for this file. Please fill them out with your credentials. You can generate a GitLab personal access token under your settings. To access the GitLab managed Terraform state using a personal access token, the token should have the `api` scope enabled.\n\nWarning: **Don’t forget to add this file to `.gitignore`**. Actually, I have it in my global gitignore file to avoid accidental commits.\n\nAs the environment variables are set up, you should make direnv to start using these variables. When you `cd` into the `terraform/network` directory a warning should appear asking you to run `direnv allow`. Enable the environment variables:\n\n```\ncd terraform/network\ndirenv allow\n```\n\n### Creating the network - finally\n\nLet’s see if we managed to set up everything right!\n\n```\nterraform init\nterraform plan\n```\n\nThe first command just initializes Terraform, downloads the Civo plugin and does some sanity checks. The second command on the other hand connects to the remote state backend, and computes the necessary changes to provide the infrastructure we described in this project.\n\nIf we like the changes, we can apply them with\n\n```\nterraform apply\n```\n\n_Nota bene_, in a real world setup, you would likely output a plan file from `terraform plan` and feed it into `terraform apply`, just like the CI/CD setup will do it later. Anyway, this is good enough for us, so let’s create the cluster next.\n\n### Running Terraform using GitLab CI/CD\n\nNote: This section assumes that you have access to GitLab Runners to run the CI/CD jobs.\n\nGiven the flexibility of GitLab CI/CD it can be set up in many different ways. Here we will build a pipeline that incorporates the most important aspects of a Terraform-oriented pipeline, without restricting you to require merge requests or any other processes. The only restriction we'll place on it is that changes should only be applied on the main branch and this should be a manual action.\n\nCopy the following code into `.gitlab-ci.yml` in the root of your project:\n\n```yaml\ninclude:\n  - template: \"Terraform/Base.latest.gitlab-ci.yml\"\n\nstages:\n- init\n- build\n- deploy\n\nnetwork:init:\n  extends: .terraform:init\n  stage: init\n  variables:\n    TF_ROOT: terraform/network\n    TF_STATE_NAME: network\n  only:\n    changes:\n      - \"terraform/network/*\"\n\nnetwork:review:\n  extends: .terraform:build\n  stage: build\n  variables:\n    TF_ROOT: terraform/network\n    TF_STATE_NAME: network\n  resource_group: tf:network\n  only:\n    changes:\n      - \"terraform/network/*\"\n\nnetwork:deploy:\n  extends: .terraform:deploy\n  stage: deploy\n  variables:\n    TF_ROOT: terraform/network\n    TF_STATE_NAME: network\n  resource_group: tf:network\n  environment:\n    name: dns\n  when: manual\n  only:\n    changes:\n      - \"terraform/network/*\"\n    variables:\n      - $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n```\n\nThis CI pipeline re-uses [the latest base Terraform CI template](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates/Terraform) shipped with GitLab, and runs the jobs by simply parameterizing them as function calls. Let's review quickly the keys used:\n\n- the [`stages`](https://docs.gitlab.com/ee/ci/yaml/#stages) keyword provides a list of stages to compose the pipeline\n- the [`extends`](https://docs.gitlab.com/ee/ci/yaml/#extends) keyword refers to the job defined in [the base Terraform template](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Terraform/Base.latest.gitlab-ci.yml)\n- the [`variables`](https://docs.gitlab.com/ee/ci/yaml/#variables) keywords parameterizes the job for our requirements\n- the [`resource_group`](https://docs.gitlab.com/ee/ci/yaml/#resource_group) keyword assures that always only one potentially conflicting job is run\n- the [`only`](https://docs.gitlab.com/ee/ci/yaml/#only--except) keyword restricts runs to specific situations\n\nIf you commit this file and push it to GitLab, a new pipeline will be created that as a last step provides you a manual job to create your network. We will extend this file later throughout this tutorial series.\n\n## Create a Kubernetes cluster\n\nThe code required for the cluster will be very similar to the code for the network.\n\n- Add `terraform/cluster/outputs.tf` file:\n\n```hcl\nterraform {\n  required_providers {\n    civo = {\n      source = \"civo/civo\"\n      version = \"0.10.4\"\n    }\n  }\n  backend \"http\" {\n  }\n}\n\n# Configure the Civo Provider\nprovider \"civo\" {\n  token = var.civo_token\n  region = local.region\n}\n\nresource \"civo_kubernetes_cluster\" \"dev-cluster\" {\n    name = \"dev-cluster\"\n    // tags = \"gitlab demo\"  // Do not add tags! There is a bug in the civo-provider :(\n    network_id = data.civo_network.network.id\n    applications = \"\"\n    num_target_nodes = 3\n    target_nodes_size = element(data.civo_instances_size.small.sizes, 0).name\n}\n```\n\nThe only difference compared to `terraform/network/outputs.tf` is the last resource as that describes the cluster. You can see how we reference the network created before. Of course, we'll need a `data` resource for this and the instance sizes.\n\n- Add `terraform/cluster/data.tf` file:\n\n```hcl\ndata \"civo_instances_size\" \"small\" {\n    filter {\n        key = \"name\"\n        values = [\"g3.small\"]\n        match_by = \"re\"\n    }\n\n    filter {\n        key = \"type\"\n        values = [\"instance\"]\n    }\n\n}\n\ndata \"civo_network\" \"network\" {\n    label = \"development\"\n}\n```\n\n\n- The `terraform/cluster/locals.tf` file outputs some useful details. We won't use them now, but they often come in handy in the longer term.\n\n```hcl\noutput \"cluster\" {\n  value = {\n    status = civo_kubernetes_cluster.dev-cluster.status\n    master_ip = civo_kubernetes_cluster.dev-cluster.master_ip\n    dns_entry = civo_kubernetes_cluster.dev-cluster.dns_entry\n  }\n}\n```\n\n- The `terraform/cluster/locals.tf` file is the same as for the network project:\n\n```hcl\nlocals {\n  region = \"LON1\"\n}\n```\n\n- The `terraform/cluster/variables.tf` file is the same as for the network project:\n\n```hcl\nvariable \"civo_token\" {\n  type = string\n  sensitive = true\n}\n```\n\n### Provision the cluster\n\nLet's see how can we extend the previous local and CI/CD setups to run this Terraform project!\n\n#### Running locally\n\n- Create `terraform/cluster/.envrc`  as you did for the network project:\n\n```\nexport TF_STATE_NAME=civo-${PWD##*terraform/}\nsource_env ../../.main.env\n```\n\nNow run Terraform:\n\n```bash\nterraform init\nterraform plan\nterraform apply\n```\n\n#### Running from CI/CD\n\nExtend the `.gitlab-ci.yaml` file with the following 3 jobs:\n\n```hcl\ncluster:init:\n  extends: .terraform:init\n  stage: init\n  variables:\n    TF_ROOT: terraform/cluster\n    TF_STATE_NAME: cluster\n  only:\n    changes:\n      - \"terraform/cluster/*\"\n\ncluster:review:\n  extends: .terraform:build\n  stage: build\n  variables:\n    TF_ROOT: terraform/cluster\n    TF_STATE_NAME: cluster\n  resource_group: tf:cluster\n  only:\n    changes:\n      - \"terraform/cluster/*\"\n\ncluster:deploy:\n  extends: .terraform:deploy\n  stage: deploy\n  variables:\n    TF_ROOT: terraform/cluster\n    TF_STATE_NAME: cluster\n  resource_group: tf:cluster\n  environment:\n    name: dev-cluster\n  when: manual\n  only:\n    changes:\n      - \"terraform/cluster/*\"\n    variables:\n      - $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n```\n\nAs you can see these are the same jobs that we saw already, they are just parameterized for the `cluster` Terraform project.\n\nOnce you push your code to GitLab, you cluster should be ready in a few minutes!\n\n_[Click here](/blog/the-ultimate-guide-to-gitops-with-gitlab/) for the next tutorial._\n\n\n\n",[9,812,853],"inside GitLab",{"slug":855,"featured":6,"template":683},"gitops-with-gitlab-infrastructure-provisioning","content:en-us:blog:gitops-with-gitlab-infrastructure-provisioning.yml","Gitops With Gitlab Infrastructure Provisioning","en-us/blog/gitops-with-gitlab-infrastructure-provisioning.yml","en-us/blog/gitops-with-gitlab-infrastructure-provisioning",{"_path":861,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":862,"content":868,"config":874,"_id":876,"_type":14,"title":877,"_source":16,"_file":878,"_stem":879,"_extension":19},"/en-us/blog/gitops-with-gitlab-manage-the-agent",{"title":863,"description":864,"ogTitle":863,"ogDescription":864,"noIndex":6,"ogImage":865,"ogUrl":866,"ogSiteName":671,"ogType":672,"canonicalUrls":866,"schema":867},"Self-managing Kubernetes agent installation with GitOps","This is the eighth and last article in a series of tutorials on how to do GitOps with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670178/Blog/Hero%20Images/GitLab-Ops.png","https://about.gitlab.com/blog/gitops-with-gitlab-manage-the-agent","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: Turn a GitLab agent for Kubernetes installation to manage itself\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-03-30\",\n      }",{"title":869,"description":864,"authors":870,"heroImage":865,"date":871,"body":872,"category":767,"tags":873},"GitOps with GitLab: Turn a GitLab agent for Kubernetes installation to manage itself",[786],"2022-03-30","\n\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can find the entire \"Ultimate guide to GitOps with GitLab\" tutorial series [here](/blog/the-ultimate-guide-to-gitops-with-gitlab/)._\n\nIn this article, we will build upon the first few articles, and will turn a GitLab agent for Kubernetes installation to manage itself. This is highly recommended for production usage as it puts your `agentk` deployment under your GitOps project, and enables flawless and simple upgrades.\n\n## Prerequisites\n\nThis article builds on a few previous articles from this series and makes the following assumptions:\n\n- You have [an agent connection set up using the `kpt` based method](/blog/gitops-with-gitlab-connecting-the-cluster/).\n- You have [set up Bitnami's Sealed secrets](/blog/gitops-with-gitlab-secrets-management/).\n- You understand [how to use `kustomize` with the agent](/blog/gitops-with-gitlab/).\n\n## The goal\n\nThe goal of this tutorial is to manage a GitLab agent for Kubernetes deployment using that given agent. This has several benefits, including: \n\n- By turning the agent to manage itself, the agent configuration and deployment is managed in code. As a result, all the code-oriented tools, including Merge Requests, Approvals, and branching are there to support your processes and policies.\n- Managing a fleet of agent installations in code enables simple upgrades of the deployments.\n\n### Upgrading GitLab and the GitLab agent for Kubernetes\n\nA single GitLab instance might have dozens of agent connections. How should you upgrade all these deployments in a coordinated way? Turning everything into code simplifies the upgrade process a lot.\n\nWe have the GitLab - Agent [version compatibility documented](https://docs.gitlab.com/ee/user/clusters/agent/install/index.html#upgrades-and-version-compatibility). The recommended approach is to first upgrade GitLab together with `KAS`, the GitLab-side component of the connection, and then upgrade all the `agentk` deployments. \n\nIf you manage the `agentk` deployments in code, the upgrade requires only bumping the version number in code and the `agentk` instances will take care of upgrading themselves.\n\n## Turning an agent installation to manage itself\n\nLet's do a quick recap and an overview how we wil use the tools.\n\nWe use `kpt` to check out tagged `agentk` deployment manifests. As the manifests are a set of `kustomize` layers, we can extend them with our own overlays if needed, or just customize the setup per our requirements. The agent connection requires a token to authenticate with GitLab. We can use Bitnami's Sealed Secrets to store an encrypted sycret in the repo.\n\nAll the above code can be put under version control safely. Moreover, we can use GitLab CI/CD to dehydrate the `kustomize` package into vanilla Kubernetes manifests that the agent can deal with.\n\nLet's see the above in action!\n\n### Kustomize layer with encrypted secret\n\nBased on the previous articles, we have the `kpt` package checked out under `packages/gitlab-agent`. We would like to store the vanilla Kubernetes manifests in the repository. We can run `kustomize build packages/gitlab-agent/cluster > kubernetes/gitlab-agent.yaml` to get the manifests, but this will include the unencrypted authentication token too.\n\nTo never output the unencrypted token, we should turn it into a sealed secret.\n\nNavigate to the `gitlab-agent` Terraform project, and create a Kubernetes secret from the token `terraform output -raw token_secret | kubectl create secret generic gitlab-agent-token -n gitlab-agent --dry-run=client --type=Opaque --from-file=token=/dev/stdin -o yaml > ../../ignored/gitlab-agent-token.yaml`. If you followed the instructions in the previous articles, the files under the `ignored` directory are never committed to `git`.\n\nWe will turn this unencrypted secret into a sealed secret. As the secret will already exist in the cluster, we should instruct the Bitnami Sealed Secret controller to pull it under its management. Moreover, as kustomize applies a random hash to every secret name, we should enable renaming the secret within the namespace. We can achieve these by adding two annotations to the unencrypted secrets object.\n\nAdd the following annotations to `ignored/gitlab-agent-token.yaml`\n\n```\nannotations:\n  sealedsecrets.bitnami.com/managed: \"true\"\n  sealedsecrets.bitnami.com/namespace-wide: \"true\"\n```\n\nNext, we should create an encrypred secret from the ignored, unencrypted one running `bin/seal-secret ignored/gitlab-agent-token.yaml > packages/gitlab-agent/sealed-secret` in the root of our project. This creates the encrypted secret under `packages/gitlab-agent/sealed-secret/SealedSecret.gitlab-agent-token.yaml`. Now, we need a kustomize layer that will use this secret instead of the original one that came with `kpt`. Let's create the following files around the encrypted secret:\n\n- Create `packages/gitlab-agent/sealed-secret/kustomization.yaml` as:\n\n```yaml\napiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nresources:\n- ../base\n- SealedSecret.gitlab-agent-token.yaml\ncomponents:\n- ../cluster/components/gitops-read-all\n- ../cluster/components/gitops-write-all\n- ../cluster/components/cilium-alert-read\nconfigurations:\n- configuration/sealed-secret-config.yaml\nsecretGenerator:\n- name: gitlab-agent-token\n  behavior: replace\n  type: Opaque\n  namespace: gitlab-agent\n  options:\n    annotations:\n      sealedsecrets.bitnami.com/managed: \"true\"\n      sealedsecrets.bitnami.com/namespace-wide: \"true\"\n```\n\n- Create `packages/gitlab-agent/sealed-secret/configuration/sealed-secret-config.yaml` as:\n\n```yaml\nnameReference:\n- kind: Secret\n  fieldSpecs:\n  - kind: SealedSecret\n    path: metadata/name\n  - kind: SealedSecret\n    path: spec/template/metadata/name\n```\n\nThis configuration enables us to reference the name of the Sealed Secret in the `secretGenerator`.\n\nWe created a new `kustomize` overlay that builds on the `base` and `cluster` layers, but will use the sealed secret. We can hydrate this into vanilla manifests using `kustomize build packages/gitlab-agent/sealed-secret > kubernetes/gitlab-agent.yaml`. This configuration does not include any unencrypted, sensitive data. As a result, we can commit it freely using `git commit`.\n\n### Adopt the agent by the agent\n\nRight now the agent configuration file looks similar to: \n\n```yaml\ngitops:\n  # Manifest projects are watched by the agent. Whenever a project changes,\n  # GitLab deploys the changes using the agent.\n  manifest_projects:\n  - id: path/to/your/project\n    default_namespace: gitlab-agent\n    # Paths inside of the repository to scan for manifest files.\n    # Directories with names starting with a dot are ignored.\n    paths:\n    - glob: 'kubernetes/test_config.yaml'\n    - glob: 'kubernetes/**/*.yaml'\n```\n\nIf we would push the previously hydrated manifests, `agentk` would fail applying them complaining about missing inventories. We can easily fix this by temporarily setting a looser inventory policy:\n\n```yaml\ngitops:\n  # Manifest projects are watched by the agent. Whenever a project changes,\n  # GitLab deploys the changes using the agent.\n  manifest_projects:\n  - id: path/to/your/project\n    default_namespace: gitlab-agent\n    inventory_policy: adopt_all\n    # Paths inside of the repository to scan for manifest files.\n    # Directories with names starting with a dot are ignored.\n    paths:\n    - glob: 'kubernetes/test_config.yaml'\n    - glob: 'kubernetes/**/*.yaml'\n```\n\nWith the inventory policy configured, we can commit and push our changes to GitLab. The agent will see the new configuration and resources, and will apply them into the cluster. From now on, you can change the code in the repository, push it to git, and the changes will be automatically applied into your cluster.\n\n#### What are inventory policies?\n\nThe GitLab agent for Kubernetes knows about the managed resources using so-called inventory objects. In technical terms, an inventory object is just a `ConfigMap` with a unique label. Whenever the agent sees an object that it should manage, it applies the same label. This way, every agent can easily find the resources that it manages.\n\nYou can read more about the possible [inventory policy configurations in the documentation](https://docs.gitlab.com/ee/user/infrastructure/clusters/deploy/inventory_object.html).\n\n\n#### A word about RBAC\n\nDepending on the authorization rights given to the `agentk` deployment, not every change might be possible. For example, if you would like to create new `ClusterRole` and `ClusterRoleBinding` in a new `kustomize` overlay, and apply that with the Agent, that might fail. It will fail, if your current role-based access control (RBAC) does not allow your `agentk` deployment to create these resources. In this case, you should either provide higher rights to your `agentk` service account first or you should apply the changes manually from your command line.\n\n### Automatic hydration\n\nNow, if you want to change something in your agent deployment, you need to take two actions:\n\n- change the code in the `kpt` package\n- run `kustomize build` to hydrate the results\n\nLet's automate the second step so you can focus on your main job only. Following the setup of [a GitOps-style Auto DevOps pipeline](/blog/gitops-with-gitlab/#hydrating-the-manifests), we need to extend the `hydrate-packages` job:\n\n\n```yaml\nhydrate-packages:\n      ...\n      script:\n      - mkdir -p new_manifests\n      ...\n      - kustomize build packages/gitlab-agent/sealed-secret > new_manifests/gitlab-agent.yaml\n```\n\nWe can re-use all the other automation as presented in the previous articles.\n\n## How to upgrade `agentk`?\n\nJust to provide a practical example, let's see how we can use the above setup to easily upgrade an `agentk` deployment to a newer version.\n\nBy running `kustomize cfg set packages/gitlab-agent agent-version v14.9.1` we set the intended `agentk` version to be version `14.9.1`. You can commit and push this change to git, and lay back in your chair to see how the changes are being rolled out across your clusters. You can point several agent configurations at the same `kubernetes/gitlab-agent.yaml` manifest, and upgrade all of them at once.\n\n## Recap\n\nIn this article we have seen:\n\n- how to turn an Agent deployment to manage itself\n- how to extend the default `kpt` project with a custom `kustomize` overlay to customize the `agentk` deployment\n- how to easily upgrade a set of `agentk` deployments\n- how to pull already existing objects to be managed by the Agent using inventory policies\n\n_Note: This is the final installment in this series of [how to do GitOps with GitLab](/blog/the-ultimate-guide-to-gitops-with-gitlab)._\n\n\n",[9,812,834],{"slug":875,"featured":6,"template":683},"gitops-with-gitlab-manage-the-agent","content:en-us:blog:gitops-with-gitlab-manage-the-agent.yml","Gitops With Gitlab Manage The Agent","en-us/blog/gitops-with-gitlab-manage-the-agent.yml","en-us/blog/gitops-with-gitlab-manage-the-agent",{"_path":881,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":882,"content":887,"config":892,"_id":894,"_type":14,"title":895,"_source":16,"_file":896,"_stem":897,"_extension":19},"/en-us/blog/gitops-with-gitlab-secrets-management",{"title":883,"description":884,"ogTitle":883,"ogDescription":884,"noIndex":6,"ogImage":825,"ogUrl":885,"ogSiteName":671,"ogType":672,"canonicalUrls":885,"schema":886},"GitOps with GitLab: How to tackle secrets management","In part four of our GitOps series, we learn how to manage secrets with the GitLab Agent for Kubernetes.","https://about.gitlab.com/blog/gitops-with-gitlab-secrets-management","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: How to tackle secrets management\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2021-12-02\",\n      }",{"title":883,"description":884,"authors":888,"heroImage":825,"date":889,"body":890,"category":767,"tags":891},[786],"2021-12-02","\n\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can also view our entire [\"Ultimate guide to GitOps with GitLab\"](/blog/the-ultimate-guide-to-gitops-with-gitlab/) tutorial series._\n\nIn this article we will use our cluster connection to manage secrets within our cluster.\n\n## Prerequisites\n\nThis article assumes that you have a Kubernetes cluster connected to GitLab using the GitLab Agent for Kubernetes. If you don't have such a cluster, I recommend looking at the linked articles above so you have a similar setup from where we will start today.\n\n## A few words about secrets management\n\nThe Kubernetes `Secret` resource is a rather tricky one! By design, secrets should have limited access and should be encrypted at rest and in transit. Still, by default, Kubernetes does not encrypt secrets at rest and accessing them might not be restricted in your cluster. We will not go into detail about how to secure your cluster with respect to secrets in this article. Instead, we want to focus on getting some secrets configured in your cluster with a GitOps approach.\n\nManaging secrets with GitOps means you store those secrets within your Git repository. Of course, you should never store unencrypted secrets in a repo, and some security people are even reluctant to store encrypted secrets in Git. We will not be that worried, but you should consider if this is an acceptable risk for you. There is an alternative we'll talk about, below, if you prefer to not manage your secrets in Git.\n\nThere are a few benefits of Git-based secrets management:\n\n- you get versioning by default\n- collaboration is supported using merge requests\n- as secrets are in code, you push responsibilities towards the development team\n- the tools used are well-known to developers\n\n## Secrets management with GitLab\n\nWhen it comes to secrets, Kubernetes, and GitLab, there are at least 3 options to choose from:\n\n- create secrets automatically from environment variables in GitLab CI\n- manage secrets through HashiCorp Vault and GitLab CI\n- manage secrets in git with a GitOps approach\n\n### Create secrets automatically from environment variables in GitLab CI\n\nThe Auto Deploy template applies every [`K8S_SECRET_` prefixed environment variable](https://docs.gitlab.com/ee/topics/autodevops/customize.html#application-secret-variables) into your cluster as a Kubernetes Secret. Later, your applications can reference these secrets. This approach is the simplest to use, especially if you would like to use [Auto DevOps](/topics/devops/). We will look into it in a future article.\n\nWhile simple to use, with this approach your secrets are stored in the GitLab database, instead of `Git`. That means you lose versioning of the secrets, you need `Maintainer` rights to modify these secrets, and you lose the ability to approve a change of secret in a merge request.\n\n### Manage secrets through HashiCorp Vault and GitLab CI\n\n[GitLab CI/CD integrates with HashiCorp Vault](https://docs.gitlab.com/ee/ci/examples/authenticating-with-hashicorp-vault/#authenticating-and-reading-secrets-with-hashicorp-vault) to support advanced secrets management use cases. You can combine the `K8S_SECRET_` prefixed use case even with Vault-based secrets, and have the secrets applied automatically. \n\nWith this approach, you get the all the benefits of HashiCorp Vault, but there is a question: why do you move secrets from Vault to GitLab just to move them to your cluster instead of retrieving the secrets directly from within your cluster? We recommend leaving GitLab out of this flow if you don't have a really good reason to provide secret access to GitLab too! Vault has really great Kubernetes support, thus retrieving secrets directly should be feasible.\n\n### Manage secrets in Git with a GitOps approach\n\nTo manage secrets in Git, we will need some kind of tooling to take care of the encryption/decryption of the secrets. In this article, I will show you how to set up and use [Bitnami's Sealed Secrets](https://github.com/bitnami-labs/sealed-secrets), but you can try other tools, like [SOPS](https://github.com/mozilla/sops) too. We will look into Bitnami's approach as it targets Kubernetes exclusively, unlike SOPS that supports other use cases too, and might need a bit more setup for Kubernetes.\n\nBitnami's Sealed Secrets is composed of an in-cluster controller and a CLI tool. The cluster component defines a `SealedSecret` custom resource that stores the encrypted secret and related metadata. Once a `SealedSecret` is deployed into the cluster, the controller decrypts it and creates a native Kubernetes `Secret` resource from it. To create a `SealedSecret` resource, the `kubeseal` utility can be used. `kubeseal` can take a public key and transform and encrypt a native Kubernetes `Secret` into a `SealedSecret`, and `kubeseal` can help with retrieving the public key from the cluster-side controller too.\n\n## Setting up Bitnami's Sealed Secrets\n\nAs the GitLab Agent supports pure Kubernetes manifests to do GitOps, we will need the manifests for Sealed Secrets. Open the [Sealed Secrets releases page](https://github.com/bitnami-labs/sealed-secrets/releases/) and find the most recent release (Don't be fooled by the `helm` releases!). At the time of writing this article, the most recent [release is v0.16.0](https://github.com/bitnami-labs/sealed-secrets/releases/tag/v0.16.0). From there you can download the release `yaml`, if your cluster supports RBAC, I recommend the basic `controller.yaml` file.\n\n- Save and commit the `controller.yaml` under `kubernetes/sealed-secrets.yaml`\n\nPush the changes and wait a few seconds for them to get applied. Check that they got applied successfully using: `kubectl get pods -n kube-system -l name=sealed-secrets-controller`\n\n## Retrieving the public key\n\nWhile the user can encrypt a secret directly with `kubeseal`, this approach requires them to have access to the Kube API. Instead of providing access, we can fetch the public key from the Sealed Secrets controller and store it in the Git repo. The public key can be used to encrypt secrets, but is useless for decrypting them.\n\n```bash\nkubeseal --fetch-cert > sealed-secrets.pub.pem\n```\n\n### How to avoid storing unencrypted secrets\n\nI prefer to have an `ignored` directory within my Git repo. The content of this directory is never committed to Git, and I put every sensitive data under this directory.\n\n```bash\nmkdir ignored\ncat \u003C\u003CEOF > ignored/.gitignore\n*\n!.gitignore\nEOF\n```\n\n## Continue with setup - not needed if we use a box\n\nNow, you can create sealed secrets with the following two commands:\n\n```bash\necho \"Very secret\" | kubectl create secret generic my-secret -n gitlab-agent --dry-run=client --type=Opaque --from-file=token=/dev/stdin -o yaml > ignored/my-secret.yaml\nkubeseal --format=yaml --cert=sealed-secrets.pub.pem \u003C ignored/my-secret.yaml > kubernetes/\n```\n\nThe first command creates a regular Kubernetes `Secret` resource in the `gitlab-agent` namespace. Setting the namespace is important if you use Sealed Secrets and every SealedSecret is scoped for a specific namespace. You can read more about this in the Sealed Secrets documentation.\n\nThe second command takes a `Secret` resource object and turns it into an encrypted `SealedSecret` resource. In my case, the secret file:\n\n```yaml\napiVersion: v1\ndata:\n  token: VmVyeSBzZWNyZXQK\nkind: Secret\nmetadata:\n  creationTimestamp: null\n  name: my-secret\n  namespace: gitlab-agent\ntype: Opaque\n```\n\ngot turned into:\n\n```yaml\napiVersion: bitnami.com/v1alpha1\nkind: SealedSecret\nmetadata:\n  creationTimestamp: null\n  name: my-secret\n  namespace: gitlab-agent\nspec:\n  encryptedData:\n    token: AgC1m/D1UwliKD3C2QSv/g+zBi1qGz1YTLZfqnl5JJ4NydCatKzsp8LZr2stIlkwcS3f2YAo/ZIq1OUhOgSgkuNMwVdqsBx1zq7Z3xpGLMIMe7B3XhQ+ExWwqgrm1dTiTDHaH9eXsZWaNsruKQU0F8oGxgLfO/axEZeGWd4WngZRaed9B43dy2k05B6fZnxmwtUVSpr86MO52fX06/QdbvB8MZTrYb7qFuL14U0IDvdFl4l8sPl2rrXsriKg0fJHIV6XtlCwPpQGozTZTUX8nbvU0yXothBzPbaIUfXseFqaW8i/i0Ai+aKhWQAjPGooVAXGwKsuve16DxZ6GJPp1ymR1cEsBkEPlYKbVCKtH5VuptCYZuTXMM6OEPzjFabaIMIUVkkciHlUMcpKFfPnpf7XbBNqZCAKjt//9L99gc48dJRyO4pCrcpFnv6287d65UGnWjmcUJNQNBhEuh9k4esfEZuBNiYIz3Ouz7Wg5HQoT6v3i3J1X5LluWEcTK1G10T7UN+QrnklH4yUtx35yLp83B5/TGICo0Yq1QnARNbKhL5EXuwAO427XO65zzJ3Lh2ymUfrBY3bHO8NW4ykO7ZNDRdj/fsge1J8k4yaxeimQapDKs4XMhoNnKqUNPQYaiQzNPRoj9JwMvtvOH+WLJqEXHIc8RooWGkdo/SB7zp3q7OuHk6HRJM+AQVP3t0r3A1bVhHonUGlv1ApduM=\n  template:\n    metadata:\n      creationTimestamp: null\n      name: my-secret\n      namespace: gitlab-agent\n    type: Opaque\n```\n\nJust commit the `SealedSecret` and quickly start to watch for the event stream using `kubectl get events --all-namespaces --watch` to see when the sealed secret is unsealed and applied as a regular `Secret`.\n\n## Utility scripts\n\nIf you found the `kubeseal` command above to be quite complex, you can wrap it in a script.\n\n- Create `bin/seal-secret.sh` with the following content:\n\n```bash\n#!/bin/sh\n\nif [ $# -ne 2 ]\n  then\n    echo \"Usage: $0 ignored/my-secret.yaml output-dir/\"\n    echo \"This script requires two arguments\"\n    echo \"The first argument should be the unsealed secret\"\n    echo \"The second argument should be the directory to output the sealed secret\"\n  exit 1\nfi\n\n\nSECRET_FILE=$(basename $1)\n\nkubeseal --format=yaml --cert=sealed-secrets.pub.pem \u003C $1 > \"$2/SealedSecret.${SECRET_FILE}\"\n\necho \"Created file $2/SealedSecret.${SECRET_FILE}\"\n```\n\nThis script takes a path to a vanilla Kubernetes secret and an output directory, and tranforms your `Secret` into a `SealedSecret`.\n\n## Winding it up\n\nIn this article, we have seen how you can install Bitnami's Sealed Secret into your cluster and set it up for static secrets management. Please note the installation method provided here works for all the other 3rd party, off-the-shelf applications that can be deployed using Kubernetes manifests only.\n\n## What is next?\n\nIn the next article, we will see how you can access a Kubernetes cluster using GitLab CI/CD and why you might want to do it even if you aim for GitOps.\n\n_[Click here](/blog/the-ultimate-guide-to-gitops-with-gitlab/) for the next tutorial._\n\n\n",[9,812,853],{"slug":893,"featured":6,"template":683},"gitops-with-gitlab-secrets-management","content:en-us:blog:gitops-with-gitlab-secrets-management.yml","Gitops With Gitlab Secrets Management","en-us/blog/gitops-with-gitlab-secrets-management.yml","en-us/blog/gitops-with-gitlab-secrets-management",{"_path":899,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":900,"content":905,"config":911,"_id":913,"_type":14,"title":914,"_source":16,"_file":915,"_stem":916,"_extension":19},"/en-us/blog/gitops-with-gitlab",{"title":901,"description":902,"ogTitle":901,"ogDescription":902,"noIndex":6,"ogImage":825,"ogUrl":903,"ogSiteName":671,"ogType":672,"canonicalUrls":903,"schema":904},"GitOps delivery by connecting Kubernetes clusters to GitLab","This is the first in a seven-part series on GitOps using GitLab's DevOps Platform.","https://about.gitlab.com/blog/gitops-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Here's how to do GitOps with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2021-10-21\",\n      }",{"title":906,"description":902,"authors":907,"heroImage":825,"date":908,"body":909,"category":767,"tags":910},"Here's how to do GitOps with GitLab",[786],"2021-10-21","\n\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can find the entire \"Ultimate guide to GitOps with GitLab\" tutorial series [here](/blog/the-ultimate-guide-to-gitops-with-gitlab/)._\n\nThis post provides an overview of the series, and will provide a bit of context around GitOps, [Infrastructure as Code](/topics/gitops/infrastructure-as-code/), and related notions.\n\n## Start with the buzzwords\n\nThe DevOps industry is changing at a very fast pace, and there are plenty of new ideas popping up around this transformation. What are these? Let’s look into the following concepts and why they matter: DevOps, site reliability engineers (SRE), GitOps, Infrastructure as Code, and containers.\n\nThe term DevOps was coined by Patrick Debois in 2009. DevOps is a cultural approach, not a technology or a set of processes. At its core there are a few principles such as continuous learning, fast feedback loops and a clear flow of work. There is a strong connection between DevOps and SRE, as one can think of the SRE approach as a well-defined implementation of DevOps. Two important aspects of the SRE approach are codified infrastructure management and metrics. These enable the level of automation needed for feedback, and their central metrics (SLIs) are being moved to the left down to development teams too.\n\nWith the emergence of cloud computing, infrastructure can be managed fully through APIs. This gave rise to Infrastructure as Code or IaC. IaC means infrastructure engineers almost never have to click through a provider’s UI to configure a new user or a resource. IaC approaches can be used to configure GitLab itself or to allow GitLab to configure a 3rd party system (such as creating a cluster or managing databases).\n\n[GitOps](/topics/gitops/) is the new kid on the block here, and it basically summarizes the current state of our industry. IaC projects likely store their code in version-controlled ways, probably in git. They might even be automated through pipelines, and the resulting infrastructure might have good observability built into the whole stack. So, what does GitOps bring to the table? It brings us two things. First, GitOps wants to avoid drift using a reconciliation loop that automatically “fixes” the infrastructure if it deviates from the codified state found in the IaC repository. Whether this is feasible and how this is done is still a debated question. At the same time, the rise of declarative infrastructure popularized by Kubernetes makes this a compelling approach to many. The second benefit of GitOps is the \"declarative\" ability. By being declarative, the desired state of the infrastructure is described in the git repo. This simplifies complexity in provisioning as the end-system is tasked by setting up the described infrastructure. Contrast this with an imperative setup where the administrators have to codify the exact steps of setting up the infrastructure.\n\nContainers are mentioned here for a single reason: Once we get to deployments, I am going to focus on containerized applications only. Containers have already proved to be a great layer of abstraction for application delivery.\n\nYou can [read more about the evolution of DevOps](/blog/gitops-as-the-evolution-of-operations/) and how we got to GitOps as part of this evolution.\n\n## The series overview\n\n**Infrastructure provisioning with GitLab and Terraform**: My next post in the series will outline how to use GitLab to provision infrastructure. In this post I will use a GitLab project to create an EKS cluster following IaC best practices. To do this I will use Terraform, as Terraform is considered to be the de facto standard in infrastructure provisioning, and GitLab has strong built-in support for it.\n\n**Connecting GitLab with a Kubernetes cluster - Quickstart**: This post will show how one can quickly connect a cluster with GitLab using our recommended way, the GitLab Agent for Kubernetes. As this is a quickstart, this approach does not use all the GitLab IaC recommendations. Nevertheless it is a great start that we can build upon later. This post will outline the different approaches for connecting a cluster to GitLab, including our recommended approach.\n\n**Secrets management with GitLab**: In the third post, I will deploy a simple “secrets as code” solution into our cluster and set it up for future use. This will demonstrate how third-party services can easily be deployed and managed with GitLab. Moreover, this specific tool will be used in the subsequent post where we migrate from the quickStart cluster connection to a self-managing, IaC connection.\n\n**Managing the cluster connection from code**: In the second post, we created a GitLab-connected cluster, but there we either need to manage the cluster from our local CLI or need to do some CI magic. Now I will demonstrate how to build out a more robust management for the cluster connection. We set up the cluster connection to manage itself using a pull-based approach.\n\n**Integrate the cluster into GitLab**: As GitLab is not just an SCM and CI tool, but the complete DevOps Platform, it has robust monitoring and security integrations with Kubernetes. In this post I am going to show how one can use the GitLab-provided cluster management application on top of our cluster connection, and install NGINX, Cilium, and custom runners with minimal effort, in an IaC style.\n\n**Application deployment with Auto DevOps**: The final post in the series will illustrate how business applications can be easily deployed into the cluster. I will focus on push-based deployments as many development teams might be familiar with pipelines, unlike the most recent pull-based approaches. At the same time, given the content from the previous posts, it should be possible to put together a pull-based deployment as top of Auto DevOps as well.\n\n_[Click here](/blog/the-ultimate-guide-to-gitops-with-gitlab/) for the next tutorial._\n\n\n",[9,769,812],{"slug":912,"featured":6,"template":683},"gitops-with-gitlab","content:en-us:blog:gitops-with-gitlab.yml","Gitops With Gitlab","en-us/blog/gitops-with-gitlab.yml","en-us/blog/gitops-with-gitlab",{"_path":918,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":919,"content":924,"config":932,"_id":934,"_type":14,"title":935,"_source":16,"_file":936,"_stem":937,"_extension":19},"/en-us/blog/how-to-agentless-gitops-aws",{"title":920,"description":921,"ogTitle":920,"ogDescription":921,"noIndex":6,"ogImage":825,"ogUrl":922,"ogSiteName":671,"ogType":672,"canonicalUrls":922,"schema":923},"How to Use Push-Based GitOps with Terraform & AWS ECS/EC2","Learn how GitLab supports agentless approach for GitOps on AWS.","https://about.gitlab.com/blog/how-to-agentless-gitops-aws","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use a push-based approach for GitOps with Terraform and AWS ECS and EC2\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-08-10\",\n      }",{"title":925,"description":921,"authors":926,"heroImage":825,"date":928,"body":929,"category":767,"tags":930},"How to use a push-based approach for GitOps with Terraform and AWS ECS and EC2",[927],"Cesar Saavedra","2021-08-10","\n\nIn [part two of our GitOps series](/blog/how-to-agentless-gitops-vars/), we described how to use a push-based (or agentless) approach for [GitOps](/topics/gitops/) by using GitLab scripting capabilities as well as integrating infrastructure-as-code tools into GitOps pipelines. In this third blog post, we’ll also dig deep into how to use a push-based approach, but this time our focus will be on the integrations of Terraform, AWS ECS, and AWS EC2 in GitOps flows. This approach may be preferable when using infrastructure components that aren't Kubernetes, such as VMs, physical devices, and cloud-provider services.\n\nSimilar to Ansible – an agentless IT automation solution – Terraform can be leveraged by the scripting capabilities of GitLab to shape your infrastructure. GitLab also provides out-of-the-box integrations with Terraform, such as GitLab-managed Terraform state and Terraform plan reports in merge requests.\n\n## GitOps flows with GitLab and Terraform\n\nIn this section, we explain how to use GitLab and Terraform for a non-Kubernetes GitOps flow and Kubernetes GitOps.\n\n### GitLab and Terraform for non-K8s infrastructure\n\nGitLab leverages Terraform to provision a non-Kubernetes infrastructure component, namely a MySQL database running on AWS.\n\nNote: Ideally, the provisioning of a database should be an on-demand, self-service process that developers can just use. We use this scenario to illustrate a GitOps flow using a non-Kubernetes infrastructure component.\n\n#### How collaboration works in GitLab\n\nSasha, a developer, creates an issue and assigns the issue to Sidney, the database administrator, who then creates a Merge Request (MR) to start her work and invite collaboration with other stakeholders across the organization. Opening the MR automatically creates a feature branch for the GitLab project. Sidney uses Terraform to create an infrastructure-as-code configuration for the database, named `mysqlmain.tf`. The database happens to be an AWS RDS MySQL instance. The database Terraform configuration file should look like this:\n\n![Terraform configuration file for MySQL database](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/0-tf-mysqlmain-created.png){: .shadow.small.center.wrap-text}\nTerraform configuration file for MySQL database.\n{: .note.text-center}\n\nTake note of the version of the database (`engine_version`), the database storage (`allocated_storage`), and the embedded database admin user (`username`) and password, in the image above.\n\nAs soon as Sidney adds the file `mysqlmain.tf` file to the feature branch, a pipeline is automatically executed by GitLab in the MR. As part of the review process, a \"Terraform plan\" is executed against the Terraform files and the output is attached to the MR as an artifact:\n\n![Terraform plan output attached to Merge Request](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/1-tf-report-in-MR.png){: .shadow.small.center.wrap-text}\nTerraform plan output attached to MR.\n{: .note.text-center}\n\nIn the picture above, you can see the note \"1 Terraform report was generated in your pipelines\". You can click on the `View full log` button to see the output file of the \"Terraform plan\" command that was run against the new configuration file, as seen below:\n\n![Terraform plan output detailed log view](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/2-tf-plan-output.png){: .shadow.small.center.wrap-text}\nTerraform plan output detailed log view.\n{: .note.text-center}\n\nThe Terraform output shows that a database will be created once this configuration file is applied to the infrastructure. The artifacts attached to an MR provide information that can help stakeholders review the proposed changes. The Terraform output in the MR fosters collaboration between stakeholders, and leads to infrastructure that is more consistent, resilient, reliable, and stable, and helps prevent unscheduled outages.\n\nIn the image below, we see how reviewers can collaborate in GitLab. The screenshow shows that the original requester, Sasha, notices that a database storage of 5 GB is too small, so she makes an inline suggestion to increase the database storage capacity to 10 GB.\n\n![Inline suggestion to increase database storage to 10GB](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/3-tf-inline-suggestion-by-Sasha.png){: .shadow.small.center.wrap-text}\nInline suggestion to increase database storage to 10GB.\n{: .note.text-center}\n\nInline suggestions foster collaboration and help increase developer productivity suggested changes can be added with the click of a button.\n\nNext, Sidney invites DevOps engineer Devon to collaborate on the MR. Devon notices that the database version in the configuration file is not the latest one. He proceeds to make an inline suggestion proposing a more up-to-date version for Sidney to review:\n\n![Inline suggestion to update database version](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/4-tf-inline-suggestion-by-Devon.png){: .shadow.small.center.wrap-text}\nInline suggestion to update database version.\n{: .note.text-center}\n\nSidney can monitor the discussion between code reviewers on the MR by tracking the number of unresolved threads. So far, there are four unresolved threads:\n\n![Number of unresolved threads displayed at the top of the MR](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/5-tf-unresolved-threads-for-Sidney.png){: .shadow.small.center.wrap-text}\nNumber of unresolved threads displayed at the top of the MR.\n{: .note.text-center}\n\nSidney starts resolving the threads by following the convenient thread navigation provided by GitLab, which makes it easy for her to process each of the proposed review items. Sidney just needs to click \"Apply suggestion\" to accept an input from a reviewer:\n\n![Applying a suggestion with a single button click](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/6-tf-apply-inline-suggestion-by-Sidney.png){: .shadow.small.center.wrap-text}\nApplying a suggestion with one click.\n{: .note.text-center}\n\nDevon suggested replacing the embedded database admin username and password with a parameter in the inline review, so Sidney replaces the embedded values with variables. The variable values will be managed by masked variables within GitLab:\n\n![Parameterizing variables in Terraform configuration file](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/7-tf-parameterizing-vars-by-Sidney.png){: .shadow.small.center.wrap-text}\nParameterizing variables in Terraform configuration file.\n{: .note.text-center}\n\nOnce the threads are resolved and the stakeholders involved in thh MR finish collaborating, it's time to merge.\n\nLearn more about how GitLab fosters collaboration using the principles of GitOps in the video below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/onFpj_wvbLM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\nIn this next example, Sasha is the one merging the MR:\n\n![Merge Request with infrastructure updates being merged](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/8-tf-MR-merged.png){: .shadow.small.center.wrap-text}\nMR with infrastructure updates being merged.\n{: .note.text-center}\n\nMerging automatically launches a pipeline that will apply the changes to the infrastructure:\n\n![GitOps pipeline completed execution](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/9-tf-pipeline-complete.png){: .shadow.small.center.wrap-text}\nGitOps pipeline completed execution.\n{: .note.text-center}\n\n#### CI/CD with non-K8s infrastructure\n\nThe CI/CD pipeline in the previous example works by validating the infrastructure configuration files. Then the pipeline validates the proposed updates against the current state of the infrastructure. Finally, it applies the updates to the production infrastructure.\n\nRunning this GitOps flow results in a brand new MySQL database on AWS RDS:\n\n![A new MySQL database has been created via a GitOps flow](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/10-db-ready.png){: .shadow.small.center.wrap-text}\nA new MySQL database has been created via a GitOps flow.\n{: .note.text-center}\n\nBy checking the details of the new MySQL database you can corroborate that the database storage is 10 GB and that the database version is the most current\"\n\n![Resulting MySQL database configuration from the collaboration of stakeholders](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/11-db-version-and-10g-storage.png){: .shadow.small.center.wrap-text}\nThe MySQL database configuration built by team member collaboration.\n{: .note.text-center}\n\nIn the next section, we look at how a similar GitOps flow can be applied to a Kubernetes cluster.\n\n### GitLab and Terraform for K8s infrastructure\n\nWe skip past all the collaboration steps to focus on a change to the EKS cluster Terraform configuration file. In the picture below, a user is changing the minimum size of the autoscaling group of the EKS cluster from one to two:\n\n![Raising autoscaling group minimum to 2](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/12-worker-nodes-to-two.png){: .shadow.small.center.wrap-text}\nIncreasing autoscaling group minimum to two.\n{: .note.text-center}\n\nWhen the stakeholder commits the change in the MR, a CI/CD pipeline validates the configuration, performs a plan against production, and applys the updates to the production infrastructure. After the pipeline finishes, the user can log into the Amazon EC2 console to verify that the EKS cluster now has a minimum of two nodes in its autoscaling group:\n\n![GitOps flow modified the number of worker nodes in K8s cluster](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/13-two-worker-nodes-on-AWS.png){: .shadow.small.center.wrap-text}\nGitOps flow modified the number of worker nodes in K8s cluster.\n{: .note.text-center}\n\nSee this scenario in action by watching the [GitOps presentation](/topics/gitops/gitops-multicloud-deployments-gitlab/) on our GitOps topics page.\n\n## GitOps flows for non-K8s (like ECS, EC2)\n\nGitLab also provides Auto Deploy capabilities to streamline application deployment to ECS and EC2, so you can shape infrastructure as desired.\n\n### Deploying to Amazon ECS\n\nAfter creating your ECS cluster, GitLab can deliver your application and its infrastructure to the cluster by including the ECS Deployment template in your `gitlab-ci.yml`, using CI/CD.\n\n```\ninclude:\nTemplate: AWS/Deploy-ECS.gitlab-ci.yml\n```\n\nNext, create the `ECS Task Definition` file in your project that specifies your app's infrastructure requirements, along with other details.\n\n![ECS Task Definition file snippet](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/14-ECS-taskdef-file.png){: .shadow.small.center.wrap-text}\nECS Task Definition file snippet.\n{: .note.text-center}\n\nFinally, define the project variable that will drive the template:\n\n![Project variables required to auto-deploy to ECS](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/15-ECS-related-vars.png){: .shadow.small.center.wrap-text}\nProject variables required to auto-deploy to ECS.\n{: .note.text-center}\n\nThe ECS deployment template does the rest, including support review pipelines.\n\n![Review pipeline in GitOps flow](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/16-ECS-review-pipeline.png){: .shadow.small.center.wrap-text}\nReview pipeline in GitOps flow.\n{: .note.text-center}\n\nIn the review pipeline above, stakeholders can review the proposed changes before sending to production. The two screenshots below show different aspects of the proposed changes in the log output of the `review_fargate` job:\n\n![Configuring load balancers in ECS](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/17-review-fargate-log-begin.png){: .shadow.small.center.wrap-text}\nConfigure load balancers in ECS.\n{: .note.text-center}\n\nSee the configuration for infrastructure components like load balancers in the image above. The image below shows infrastructure components like subnets, security groups, and the assignment of a public IP address:\n\n![Configuring subnets, security groups in ECS](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/18-review-fargate-log-middle.png){: .shadow.small.center.wrap-text}\nConfiguring subnets and security groups in ECS.\n{: .note.text-center}\n\nOnce all stakeholders are done collaborating on a proposed change to the production infrastructure, the updates are applied using a CI/CD pipeline. Below is an example of this type of pipeline:\n\n![Applying infrastructure updates to production](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/19-ECS-prod-pipeline.png){: .shadow.small.center.wrap-text}\nApplying infrastructure updates to production.\n{: .note.text-center}\n\nRead our documentation to learn more about [how GitLab users can Auto Deploy to ECS](https://docs.gitlab.com/ee/ci/cloud_deployment/#deploy-your-application-to-the-aws-elastic-container-service-ecs).\n\n### Deploying to Amazon EC2\n\nGitLab also provides a built-in template to provision infrastructure and deploy your applications to EC2 as part of Auto DevOps. The template:\n\n- Provisions infrastructure using AWS CloudFormation\n- Pushes application to S3\n- Deploys your application from S3 to EC2\n\nEach of these steps requires a JSON configuration file. Below is an example of a portion of a CloudFormation Stack JSON file used to create your infrastructure:\n\n![CloudFormation stack JSON snippet](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/20-EC2-portion-stack-file.png){: .shadow.small.center.wrap-text}\nCloudFormation stack JSON snippet.\n{: .note.text-center}\n\nThe JSON used by the Auto Deploy template to push your application to S3 would look similar to this:\n\n![JSON to push application to S3](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/21-EC2-push-file.png){: .shadow.small.center.wrap-text}\nJSON to push application to S3.\n{: .note.text-center}\n\nAnd the file used for the actual deployment of your application from S3 to EC2 would be like the following:\n\n![JSON to deploy application to EC2](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/22-EC2-deploy-file.png){: .shadow.small.center.wrap-text}\nJSON to deploy application to EC2.\n{: .note.text-center}\n\nAfter creating these files, you need to create the following variables in your project - displayed here with some sample values:\n\n```\nvariables:\n  CI_AWS_CF_CREATE_STACK_FILE: 'aws/cf_create_stack.json'\n  CI_AWS_S3_PUSH_FILE: 'aws/s3_push.json'\n  CI_AWS_EC2_DEPLOYMENT_FILE: 'aws/create_deployment.json'\n  CI_AWS_CF_STACK_NAME: 'YourStackName'\n```\n\nThe last step is to include the template in your `.gitlab-ci.yml` file:\n\n```\ninclude:\n  - template: AWS/CF-Provision-and-Deploy-EC2.gitlab-ci.yml\n```\n\nMore details on [how GitLab uses Auto Deploy to EC2 are available in the documentation](https://docs.gitlab.com/ee/ci/cloud_deployment/#provision-and-deploy-to-your-aws-elastic-compute-cloud-ec2).\n\n## Agent or agentless: GitLab has your GitOps flows covered\n\nWhether your situation calls for an agent-based/pull-approach to doing GitOps, or for an agentless/push-approach, GitLab has your back. GitLab offers the flexibility to choose the approach to GitOps that best fits your specific projects or applications. GitLab also supports many types of infrastructures – from physical components and virtual machines, Kubernetes and containers, as well as infrastructure-as-code tools like Terraform, Ansible, and AWS Cloud Formation.\n",[9,769,931],"demo",{"slug":933,"featured":6,"template":683},"how-to-agentless-gitops-aws","content:en-us:blog:how-to-agentless-gitops-aws.yml","How To Agentless Gitops Aws","en-us/blog/how-to-agentless-gitops-aws.yml","en-us/blog/how-to-agentless-gitops-aws",{"_path":939,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":940,"content":946,"config":952,"_id":954,"_type":14,"title":955,"_source":16,"_file":956,"_stem":957,"_extension":19},"/en-us/blog/how-to-agentless-gitops-vars",{"title":941,"description":942,"ogTitle":941,"ogDescription":942,"noIndex":6,"ogImage":943,"ogUrl":944,"ogSiteName":671,"ogType":672,"canonicalUrls":944,"schema":945},"Using push-based GitOps with GitLab scripts and variables","Learn how GitLab supports agentless approach for GitOps with scripting and variables.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682051/Blog/Hero%20Images/agentless-gitops-vars-cover-880x587.jpg","https://about.gitlab.com/blog/how-to-agentless-gitops-vars","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use a push-based approach for GitOps with GitLab scripting and variables\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-07-23\",\n      }",{"title":947,"description":942,"authors":948,"heroImage":943,"date":949,"body":950,"category":767,"tags":951},"How to use a push-based approach for GitOps with GitLab scripting and variables",[927],"2021-07-23","\n\nIn [part one](/blog/how-to-use-agent-based-gitops/) of our GitOps series, we described how to use a pull-based (or agent-based) approach. In this second blog post, we'll dig deep into how to use a push-based approach. The agentless approach may be preferable for situations with non-Kubernetes infrastructure components or when you don't want to install, run, and maintain agents in each infrastructure component for [GitOps](/topics/gitops/). In this post, we will discuss how the scripting capabilities of GitLab can be used in GitOps workflows, and how to use predefined GitLab variables to shape infrastructure components.\n\n## About a push-based or agentless approach\n\nWith the agentless approach, infrastructure expressed and managed as code on GitLab, and updates and drift detection are automated and handled by GitLab without having to install any agents on infrastructure components.\n\n## How to use scripting in your pipelines to shape infrastructure\n\nGitLab allows automation using scripting. Whether you're using Docker, Helm, Ansible, or even direct SSH commands, you can use the scripting capabilities of GitLab to create, shape, and modify infrastructure.\n\nIn the example below, the pipeline determines the shape of the infrastructure the application runs on by specifying a Docker image as well as running Docker commands to build and push an application to the GitLab built-in container registry.\n\n![Using Docker in your pipeline to shape infrastructure](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/0-docker-use-in-pipeline.png){: .shadow.small.center.wrap-text}\nHow to use Docker in your pipeline to shape infrastructure.\n{: .note.text-center}\n\nThe infrastructure is shaped again at a later stage in the pipeline, but this time by using kubectl and Helm commands:\n\n![Using kubectl in your pipeline to shape infrastructure](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/1-helm-use-in-pipeline.png){: .shadow.medium.center.wrap-text}\nHow to use kubectl in your pipeline to shape infrastructure.\n{: .note.text-center}\n\nDepending on the type of infrastructure, other technologies can be used to shape the infrastructure. In the next example, Ansible is used to run a playbook that sets up the infrastructure for an entire lab environment:\n\n![Using Ansible in your pipeline to shape infrastructure](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/2-ansible-use-in-pipeline.png){: .shadow.medium.center.wrap-text}\nHow to use Ansible in your pipeline to shape infrastructure.\n{: .note.text-center}\n\nThe scripting capabilities of GitLab pipelines combined with GitLab's CI/CD capabilities allow users to create GitOps flows to manage Infrastructure as Code (IaC), which delivers more resilient infrastructure and less risk of unscheduled downtime.\n\n## How to use Auto DevOps to modify infrastructure using variables\n\nGitLab also allows users to shape infrastructure by using project or group variables. The number of production pods in a Kubernetes cluster is updated to four in the example below:\n\n![Using variables to shape infrastructure](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/3-ado-modify-infra-via-vars.png){: .shadow.medium.center.wrap-text}\nHow to use variables to shape infrastructure.\n{: .note.text-center}\n\nThe number of the production pods are changed to four on the next execution of the pipeline:\n\n![Production pods increased via a variable update](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/4-ado-modified-infra-via-vars.png){: .shadow.medium.center.wrap-text}\nProduction pods changed using a variable update.\n{: .note.text-center}\n\nThere are many GitLab [build and deployment variables](https://docs.gitlab.com/ee/topics/autodevops/customize.html#build-and-deployment) that can modify infrastructure. [PostgreSQL](https://www.postgresql.org/) is provisioned as a component in infrastructure by default in GitLab to support applications that require a database and also provides [these variables](https://docs.gitlab.com/ee/topics/autodevops/customize.html#database) to customize it.\n\n## How GitLab capabilities help agentless infrastructure\n\nThe scripting capabilities of GitLab are a convenient way to shape infrastructure components in GitOps workflows using a push-based approach. This method allows for the easy integration of IaC tools in your GitOps pipelines. If you are doing IaC and GitOps for non-Kubernetes infrastructure components, this is the best approach. GitLab also provides out-of-the-box variables, so users can impact selected infrastructure components. In the final part of this GitOps series, we will discuss an agentless approach using our integration to Terraform as well as examples of GitOps flows for AWS ECS and EC2.\n\nCover image by [Rod Long](https://unsplash.com/@rodlong?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/machu-picchu?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n\n## Read more on GitOps with GitLab: \n\n- [GitOps with GitLab: Infrastructure provisioning with GitLab and Terraform](/blog/gitops-with-gitlab-infrastructure-provisioning/)\n\n- [Here's how to do GitOps with GitLab](/blog/gitops-with-gitlab/)\n\n- [GitOps viewed as part of the Ops evolution](/blog/gitops-as-the-evolution-of-operations/)\n\n- [GitOps with GitLab: Connect with a Kubernetes cluster](/blog/gitops-with-gitlab-connecting-the-cluster/)\n\n\n\n",[9,769,931],{"slug":953,"featured":6,"template":683},"how-to-agentless-gitops-vars","content:en-us:blog:how-to-agentless-gitops-vars.yml","How To Agentless Gitops Vars","en-us/blog/how-to-agentless-gitops-vars.yml","en-us/blog/how-to-agentless-gitops-vars",{"_path":959,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":960,"content":966,"config":972,"_id":974,"_type":14,"title":975,"_source":16,"_file":976,"_stem":977,"_extension":19},"/en-us/blog/how-to-get-gitops-right-with-iac-security",{"title":961,"description":962,"ogTitle":961,"ogDescription":962,"noIndex":6,"ogImage":963,"ogUrl":964,"ogSiteName":671,"ogType":672,"canonicalUrls":964,"schema":965},"How to get GitOps right with infrastructure as code security","Learn how the GitLab and Indeni integration makes security a core component of your GitOps workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663403/Blog/Hero%20Images/gitops-partner-cover-image.jpg","https://about.gitlab.com/blog/how-to-get-gitops-right-with-iac-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to get GitOps right with infrastructure as code security\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ulrica de Fort-Menares\"}],\n        \"datePublished\": \"2021-06-10\",\n      }",{"title":961,"description":962,"authors":967,"heroImage":963,"date":969,"body":970,"category":767,"tags":971},[968],"Ulrica de Fort-Menares","2021-06-10","\nIn today's competitive digital era, it is imperative for organizations to undergo a digital transformation to effectively compete. For many, achieving a digital transformation means transitioning toward a DevOps model.\n\nDevOps has been around for many years, and the development side of the house has benefitted from the core practices of DevOps. However, the infrastructure side of the house has been lagging behind, particularly when it comes to speed. With [infrastructure as code (IaC)](/topics/gitops/infrastructure-as-code/) and [GitOps](/topics/gitops/), infrastructure teams have been able to apply the same disciplines and quality gates that are used to manage application code to the infrastructure - to deliver products faster, with more predictability and at scale.\n\n## Security slowing down delivery\n\nWhile the GitOps concept promises faster and more frequent deployment, the last thing you want is to be slowed down by your legacy security programs. How often has your release stopped near the end of process because it failed the security gate? All too often security testing is tacked on at the end of delivery. Developers inevitably spend significant time and energy investigating these security issues, which delays the release. Uncovering issues late in the cycle is expensive and painful to fix, not to mention creating unnecessary stress.\n\nThe software development process has been shifting left to deliver better-quality software faster. By using IaC, you can adopt the same DevOps principle for the infrastructure. Learning from the development world, you should integrate security controls into the development lifecycle early and everywhere.\n\n## How to shift your IaC security checks left\n\nThe core of the partnership between Indeni and GitLab is about making security a key part of the GitOps practice. The [Indeni Cloudrail](https://indeni.com/cloudrail/) and GitLab CI/CD integration brings IaC security into the tools that developers are familiar with and want to use.\n\n![GitOps workflow](https://about.gitlab.com/images/blogimages/secure-gitops-workflow.png){: .shadow}\nHow GitLab CI/CD fits into the Indeni Cloudrail DevOps workflow.\n{: .note.text-center}\n\nThe joint solution modernizes security programs with the shift-left approach and automates infrastructure compliance. Developers no longer need to get in line for security reviews. Instead, IaC will be automatically evaluated for security impacts. Security controls are integrated into the development lifecycle before deployment.\n\n![GitOps workflow](https://about.gitlab.com/images/blogimages/secure-gitops1.jpg){: .shadow}\nCatching IaC security violations in GitLab CI/CD.\n{: .note.text-center}\n\nAs shown in the example above, Indeni Cloudrail provides feedback in GitLab CI. This way, security risks relating to the infrastructure can be instantly remediated when they are made so developers can move fast. You can think of the shift security left approach as testing IaC continuously and preventing insecure infrastructure from being deployed.\n\n## Don't let those noisy security tools impede your GitOps practice\n\nSecurity tools are notorious for being noisy with their many false positives. According to the Advanced Technology Academic Research Center [(ATARC) Federal DevSecOps Landscape survey](https://atarc.org/project/devsecops-survey/), too many false positives is the number one frustration with security testing. A noisy security tool can be counterproductive by inadvertently stopping the pipeline frustrating your developers.\n\nWhat makes Indeni Cloudrail unique is its context-based analysis, which refers to its ability to understand the relationships among cloud resources, making in-depth security analyses possible. Cloudrail also factors in already existing resources in the cloud environment to gain a holistic view as part of its analysis. The end result is three times less noise than any comparable IaC security tools in the market. In essence, Cloudrail will only bother developers with problems that truly matter to the organization. Learn more about [what makes Cloudrail unique in this blog post](https://indeni.com/blog/comparing-cloudrail-checkov-tfsec-and-kics-with-testing/).\n\n## Why GitLab and Indeni are better together\n\nBy delivering a developer-centric security tool for IaC, security has a better chance of gaining acceptance in the developer community. Together, Indeni and GitLab equip developers with the right tools to support a GitOps model and help organizations with their digital transformation.\n\n## Watch the demo\n\nWatch the Cloudrail demo to see the GitOps workflow for IaC security.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/9WSd0D87Vxc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### About Indeni\n\n_[Indeni](https://indeni.com/) automates best practices for network security and cloud security. Its security infrastructure platform automates health and compliance checks for leading firewalls to maximize uptime and efficiency. Its Infrastructure-as-Code security analysis tool, Cloudrail, automates infrastructure compliance to prevent insecure cloud environments from being deployed._\n\nCover image by [Dimitry Anikin](https://unsplash.com/@anikinearthwalker) on [Unsplash](https://unsplash.com/photos/DsmjpJzm2i0)\n",[9,702,769,231],{"slug":973,"featured":6,"template":683},"how-to-get-gitops-right-with-iac-security","content:en-us:blog:how-to-get-gitops-right-with-iac-security.yml","How To Get Gitops Right With Iac Security","en-us/blog/how-to-get-gitops-right-with-iac-security.yml","en-us/blog/how-to-get-gitops-right-with-iac-security",{"_path":979,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":980,"content":986,"config":994,"_id":996,"_type":14,"title":997,"_source":16,"_file":998,"_stem":999,"_extension":19},"/en-us/blog/how-to-provision-reviewops",{"title":981,"description":982,"ogTitle":981,"ogDescription":982,"noIndex":6,"ogImage":983,"ogUrl":984,"ogSiteName":671,"ogType":672,"canonicalUrls":984,"schema":985},"Deploying dynamic review environments with MRs and Argo CD","Here's how to use the Argo CD ApplicationSet to provision a ‘ReviewOps’ environment based on merge request changes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681920/Blog/Hero%20Images/kubernetes.png","https://about.gitlab.com/blog/how-to-provision-reviewops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to provision dynamic review environments using merge requests and Argo CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joe Randazzo\"},{\"@type\":\"Person\",\"name\":\"Madou Coulibaly\"}],\n        \"datePublished\": \"2022-08-02\",\n      }",{"title":987,"description":982,"authors":988,"heroImage":983,"date":991,"body":992,"category":767,"tags":993},"How to provision dynamic review environments using merge requests and Argo CD",[989,990],"Joe Randazzo","Madou Coulibaly","2022-08-02","\nWe recently learned of a new contribution to the ApplicationSet in the Argo CD project, specifically the [Pull Request generator for GitLab](https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/applicationset/Generators-Pull-Request.md#gitlab) and decided to take it for a spin. What makes this interesting is now dynamic [review environments](https://docs.gitlab.com/ee/ci/review_apps/index.html) can be provisioned intuitively from the merge request (MR) using a [GitOps](/topics/gitops/) workflow. The benefit is code reviewers or designers can quickly review any app changes to your Kubernetes cluster all from within the merge request.\n\nIn traditional testing workflows, you may have pushed your changes into a development environment, waiting for the QA and UX team to pull those changes into their environment for further review, and then received feedback based on your small change. At this point, time was wasted between various teams with environment coordination or adding bugs to the backlog of the new changes. \n\nWith the combination of a merge request and review environments, you can quickly spin up a test environment based on the changes of your feature branch. This means the QA or UX team can suggest improvements or changes during the code review process without wasting cycles.\n\nThe introduction of the ApplicationSet has given greater flexibility to Argo CD workflows such as:\n\n- Allowing unprivileged cluster users to deploy applications (without namespace access)\n- Deploying applications to multiple clusters at once\n- Deploying many applications from a single monorepo\n- **And triggering review environments based on a pull request**\n\n### Let's review the ApplicationSet and the GitLab Pull Request Generator\n\nThe [Pull Request Generator](https://argo-cd.readthedocs.io/en/latest/operator-manual/applicationset/Generators-Pull-Request) will use the GitLab API to automatically discover new merge requests within a repository. Depending on the filter match of the MR, a review environment will then be generated.\n\n```yaml\napiVersion: argoproj.io/v1alpha1\nkind: ApplicationSet\nmetadata:\n  name: review-the-application\n  namespace: argocd\nspec:\n  generators:\n  - pullRequest:\n      gitlab:\n        project: \u003Cproject-id>\n        api: https://gitlab.com/\n        tokenRef:\n          secretName: \u003Cgitlab-token>\n          key: token\n        pullRequestState: opened\n      requeueAfterSeconds: 60\n  template:\n    metadata:\n      name: 'review-the-application-{{number}}'\n    spec:\n      source:\n        repoURL: \u003Crepository-with-manifest-files>\n        path: chart/\n        targetRevision: 'HEAD'\n        helm:\n          parameters:\n          - name: \"image.repository\"\n            value: \"registry.gitlab.com/\u003Cgroup-and-project-path>/{{branch}}\"\n          - name: \"image.tag\"\n            value: \"{{head_sha}}\"\n          - name: \"service.url\"\n            value: \"the-application-{{number}}.\u003Cip>.nip.io\"\n      project: default\n      destination:\n        server: https://kubernetes.default.svc\n        namespace: dynamic-environments-with-argo-cd\n```\n#### Fields\n\n* `project`: The GitLab Project ID\n* `api`: URL of GitLab instance\n* `tokenRef`: The secret to monitor merge request changes\n* `labels`: Provision review environments based on a GitLab label\n* `pullRequestState`: Provision review environments based on [MR states](https://docs.gitlab.com/ee/api/merge_requests.html)\n\nFilter options include GitLab labels, merge request state (open, closed, merged), and branch match. Templating options include merge request ID, branch name, branch slug, head sha, and head short sha.\n\nSee the latest [ApplicationSet documentation](https://argo-cd.readthedocs.io/en/latest/operator-manual/applicationset/Generators-Pull-Request/#gitlab) for additional details.\n\nFor this blog post, we explore using the Argo CD ApplicationSet to provision a “ReviewOps” environment based on merge request changes.\n\n### Prerequisites\n\nThe following tools are required for running this tutorial. Please install and/or configure them before getting started.\n\n- **Tools**\n  - GitLab v15.0+ \n  - Kubernetes cluster v1.21+\n  - Argo CD 2.5.0+\n- **CLI**\n  - kubectl v1.21+\n\n### Explore the Source Code\n\nFirst, let’s explore the [source code](https://gitlab.com/madou-stories/dynamic-environments-with-argo-cd) for the tutorial.\n\nThis GitLab group is composed of the 2 following projects:\n\n- `The Application`: contains the source code of a containerized application and its CI/CD pipeline\n- `The Application Configuration`: contains the application configuration (Kubernetes Manifests) managed by Helm\n\n![git-repository](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/git-repository.png)\n\n### Setting up GitLab\n\n1. Create your GitLab Group and fork the [The Application](https://gitlab.com/madou-stories/dynamic-environments-with-argo-cd/the-application) and [The Application Configuration](https://gitlab.com/madou-stories/dynamic-environments-with-argo-cd/the-application-configuration) projects into it.\n\n2. In `The Application Configuration` project, edit the `**manifests/applicationset.yml**` as follows:\n\n  * `.spec.generators.pullRequest.gitlab.project`: The Project ID of `The Application`\n  * `.spec.template.spec.source.repoURL`: Git URL of `The Application Configuration`\n  * `.spec.template.spec.source.helm.parameters.\"image.repository\"`: Point to image repository, for example `registry.gitlab.com/\u003CYour_GitLab_Group>/the-application/{{branch}}`\n\n  Note: keep the {{branch}} string as is and replace \u003CYour_GitLab_Group> with the name of the group you created in step 1.\n\n  * `.spec.template.spec.source.helm.parameters.\"service.url\"`: Templated with `the-application-{{number}}.\u003CYour_Kube_Ingress_Base_Domain>`\n\n  Note: keep the {{number}} string as is and replace \u003CYour_Kube_Ingress_Base_Domain> with the base domain of your Kubernetes Cluster.\n\n3. Define the following CI/CD variables at the group level:\n\n   - `ARGOCD_SERVER_URL`, the Argo CD server address\n   - `ARGOCD_USERNAME`, the username of your Argo CD account\n   - `ARGOCD_PASSWORD`, the password of your Argo CD account\n   - `KUBE_INGRESS_BASE_DOMAIN`, the base domain of your Kubernetes Cluster\n\n   ![cicd-variables](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/cicd-variables.png)\n\n4. Generate a Group access token to grant `read_api` and `read_registry` access to this group and its sub-projects.\n\n   ![group-access-token](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/group-access-token.png)\n\n   Save the group access token somewhere safe. We will use it later.\n\n### Setting up Kubernetes\n\n1. Create a namespace called `dynamic-environments-with-argo-cd`.\n   ```shell\n   kubectl create namespace dynamic-environments-with-argo-cd\n   ```\n2. Create a Kubernetes secret called `gitlab-token-dewac` to allow Argo CD to use the GitLab API.\n   ```shell\n   kubectl create secret generic gitlab-token-dewac -n argocd --from-literal=token=\u003CYour_Access_Token>\n   ```\n3. Create another Kubernetes secret called `gitlab-token-dewac` to allow Kubernetes to pull images from the GitLab Container Registry.\n   ```shell\n   kubectl create secret generic gitlab-token-dewac -n dynamic-environments-with-argo-cd --from-literal=token=\u003CYour_Access_Token>\n   ```\n\n### Setting up Argo CD\n\n1. Create the Argo CD ApplicationSet to generate an Argo CD Application associated with a merge request.\n   ```shell\n   kubectl apply -f https://gitlab.com/\u003CYour_GitLab_Group>/the-application-configuration/-/raw/main/manifests/applicationset.yaml\n   ```\n\n### Update the source code\n\n1. In `The Application` project, create a GitLab issue, then an associated branch and merge request. \n2. In Argo CD, a new application is provisioned called `review-the-application` based on the new merge request event.\n\n   ![review-the-application-argocd](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/review-the-application-argocd.png)\n\n3. In `The Application` project, edit the `index.pug` and replace `p Welcome to #{title}`  with `p Bienvenue à #{title}`.\n4. Commit into your recent branch which is going to trigger a pipeline run.\n5. In the CI/CD > Pipelines, you will find the following pipeline running on your merge request:\n\n   ![feature-branch-pipeline](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/feature-branch-pipeline.png)\n\n   where,\n\n   - `docker-build`: builds the container image\n   - `reviewops`: configures and deploys the container into the review environment using Argo CD\n   - `stop-reviewops`: deletes the review environment\n\n6. Once completed, the `review-the-application` application in Argo CD is now synced.\n\n   ![review-the-application-synced](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/review-the-application-synced.png)\n\n7. From the merge request, click on the `View app` button to access to your application.\n\n   ![view-app-button](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/view-app-button.png)\n\n   The outcome should be as follows:\n\n   ![express-app](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/express-app.png)\n\n8. You have succesfully provisioned a dynamic review environment based on your merge request! Once the merge request is closed, the environment will be automatically cleaned up.\n\n## To sum up\n\nHopefully this tutorial has been helpful and has inspired your GitLab + Argo CD workflows with review environments.\n\nWe'd love to hear in the comments on how this is working for you, as well as your ideas on how we can make GitLab a better place for GitOps workflows.\n",[724,9,769],{"slug":995,"featured":6,"template":683},"how-to-provision-reviewops","content:en-us:blog:how-to-provision-reviewops.yml","How To Provision Reviewops","en-us/blog/how-to-provision-reviewops.yml","en-us/blog/how-to-provision-reviewops",{"_path":1001,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1002,"content":1008,"config":1015,"_id":1017,"_type":14,"title":1018,"_source":16,"_file":1019,"_stem":1020,"_extension":19},"/en-us/blog/how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes",{"title":1003,"description":1004,"ogTitle":1003,"ogDescription":1004,"noIndex":6,"ogImage":1005,"ogUrl":1006,"ogSiteName":671,"ogType":672,"canonicalUrls":1006,"schema":1007},"How to stream logs through the GitLab Dashboard for Kubernetes","In GitLab 17.2, users can now view Kubernetes pod and container logs directly via the GitLab UI. This tutorial shows how to use this new feature to simplify monitoring Kubernetes infrastructure.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662245/Blog/Hero%20Images/blog-image-template-1800x945__16_.png","https://about.gitlab.com/blog/how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to stream logs through the GitLab Dashboard for Kubernetes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Daniel Helfand\"}],\n        \"datePublished\": \"2024-08-19\",\n      }",{"title":1003,"description":1004,"authors":1009,"heroImage":1005,"date":1011,"body":1012,"category":767,"tags":1013},[1010],"Daniel Helfand","2024-08-19","Developers are context-switching more frequently, needing to understand and use multiple tools to accomplish complex tasks. These tools all have different user experiences and often do not present all the information needed to successfully develop, troubleshoot, and ship critical features. It is challenging enough to release and monitor software changes without also needing to understand so many tools.\n\nWith the addition of [pod log streaming through the GitLab Dashboard for Kubernetes in v17.2](https://about.gitlab.com/releases/2024/07/18/gitlab-17-2-released/#log-streaming-for-kubernetes-pods-and-containers), developers can go straight from a merge request review to watching a deployment rolled out to Kubernetes. This new feature will:\n- allow developers to avoid switching tooling\n- ease the process of troubleshooting and monitoring deployments and post-deployment application health\n- strengthen [GitOps workflows](https://docs.gitlab.com/ee/user/clusters/agent/gitops.html) to easily manage application and infrastructure changes\n\nThe new feature allows GitLab users to view the logs of pods and containers directly via the GitLab UI. In previous versions of GitLab, users could configure a GitLab project to view pods deployed to certain namespaces on an associated cluster. This new feature allows users to further monitor workloads running on Kubernetes without needing to switch to another tool.\n\nIn the sections below, you will learn how to use this new feature by adding a Kubernetes cluster to a GitLab project, deploying a sample workload to a cluster, and viewing the logs of this workload running on a cluster. \n\n> Need to know the basics of Kubernetes? [Read this quick introductory blog](https://about.gitlab.com/blog/kubernetes-the-container-orchestration-solution/).\n\n## Configure a GitLab project to view Kubernetes resources\n\nBefore proceeding with this section, the following prerequisites are required:\n* a remote Kubernetes cluster (i.e., not running locally on your machine)\n* access to a GitLab v17.2 account\n* [this repository](https://gitlab.com/gitlab-da/tutorials/cloud-native/gitlab-k8s-log-streaming-example) forked to a GitLab group to which you have access\n* Helm CLI\n* kubectl CLI\n\nOnce you have satisfied the prerequisites involved, add an agent configuration file to the GitLab project you forked. The configuration file allows users to control permissions around how GitLab users may interact with the associated Kubernetes cluster.\n\nYou can use the configuration file included in this GitLab project by changing the following file: `.gitlab/agents/k8s-agent/config.yaml`. Replace the `\u003CGitLab group>` in the id property shown below with the group where you have forked the example project. This config file will allow [GitLab to access your cluster via an agent](https://docs.gitlab.com/ee/user/clusters/agent/user_access.html) that can be installed on your cluster.\n\n```yaml\nuser_access:\n  access_as:\n    agent: {}\n  projects:\n    - id: \u003CGitLab group>/gitlab-k8s-log-streaming-example\n```\n\nOnce the above file is edited, you can commit and push these changes to the main branch of the project. \n\n## Add GitLab Kubernetes agent to cluster\n\nWith the agent configuration file added, now add the cluster to GitLab by installing an agent on your cluster. In the GitLab UI, go to your project and, on the left side of the screen, select **Operate > Kubernetes clusters**. Once on this page, select the **Connect a cluster** button on the right side of the screen. From the dropdown menu, you can then select the agent, which should be `k8s-agent`. Click **Register** to get instructions for how to install the agent on your cluster.\n\nThe instructions presented to you after registering the agent will be to run a helm command that will install the GitLab agent on your cluster. Before running the command locally, you will want to ensure your Kubernetes context is targeting the cluster you want to work with. Once you have verified you are using the correct kubeconfig locally, you can run the helm command to install the agent on your cluster.\n\nOnce both pods are running, GitLab should be able to connect to the agent. Run the following command to wait for the pods to start up:\n\n```shell\nkubectl get pods -n gitlab-agent-k8s-agent -w\n```\n\n## Deploy sample application to your cluster\n\nBefore you can view logs of a workload through GitLab, you first need to have something running on your cluster. To do this, you can run the following kubectl command locally. \n\n```shell\nkubectl apply -f https://gitlab.com/gitlab-da/tutorials/cloud-native/gitlab-k8s-log-streaming-example/-/raw/main/k8s-manifests/k8s.yaml\n```\n\nAfter the command runs successfully, you are now ready to complete the final step to set up a Kubernetes dashboard via GitLab.\n\n## View pod logs through the GitLab UI\n\nTo add the Kubernetes dashboard via the GitLab UI, go to your project and, on the left side of the screen, select **Operate > Environments**. On the top right side of the screen, select the **Create an environment**.\n\nNext, you can give your environment a name, select the GitLab agent (i.e. `k8s-agent`), and pick a namespace for the Kubernetes dashboard to focus on. Since the application is running in the `gitlab-k8s-log-streaming-example-dev` namespace, select this option from the namespace dropdown. After naming the environment and selecting the agent and namespace, click **Save**.\n\nAfter creating the environment, you should now see information about the application’s pods displayed via the GitLab UI.\n\n![Kubernetes logs - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676402/Blog/Content%20Images/Screenshot_2024-08-20_at_12.15.08_PM.png)\n\nGo to the right side of the screen and click **View Logs** to see logs for one of the pods associated with the application. \n\n![Kubernetes dashboard - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676402/Blog/Content%20Images/Screenshot_2024-08-20_at_12.16.56_PM.png)\n\n## Try it out and share feedback\n\nThe introduction of pod log streaming in GitLab v17.2 will help GitLab users get one step closer to managing complex deployments to Kubernetes, as well as monitoring and troubleshooting issues post deployment via a common user experience. We are excited to hear more about users’ experiences with this new enhancement and how it helps improve DevOps workflows around Kubernetes. To share your experience with us, you can open an issue to the [project associated with this tutorial](https://gitlab.com/gitlab-da/tutorials/cloud-native/gitlab-k8s-log-streaming-example). Or, [comment directly in the Kubernetes log streaming feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/478379) to report information to the GitLab engineering team.\n\nMore information on getting started with the GitLab Dashboard for Kubernetes can be found in the documentation [here](https://docs.gitlab.com/ee/ci/environments/kubernetes_dashboard.html).\n\n> To explore the GitLab Dashboard for Kubernetes as well as other more advanced features of GitLab, sign up for [our free 30-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/).\n",[1014,9,812,834],"features",{"slug":1016,"featured":91,"template":683},"how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes","content:en-us:blog:how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes.yml","How To Stream Logs Through The Gitlab Dashboard For Kubernetes","en-us/blog/how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes.yml","en-us/blog/how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes",{"_path":1022,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1023,"content":1029,"config":1035,"_id":1037,"_type":14,"title":1038,"_source":16,"_file":1039,"_stem":1040,"_extension":19},"/en-us/blog/how-to-use-agent-based-gitops",{"title":1024,"description":1025,"ogTitle":1024,"ogDescription":1025,"noIndex":6,"ogImage":1026,"ogUrl":1027,"ogSiteName":671,"ogType":672,"canonicalUrls":1027,"schema":1028},"How to use a pull-based (agent-based) approach for GitOps","Learn how GitLab supports agent-based approach for GitOps","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682037/Blog/Hero%20Images/agent-based-gitops-cover-880x587.jpg","https://about.gitlab.com/blog/how-to-use-agent-based-gitops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use a pull-based (agent-based) approach for GitOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-06-23\",\n      }",{"title":1024,"description":1025,"authors":1030,"heroImage":1026,"date":1031,"body":1032,"category":767,"tags":1033},[927],"2021-06-23","\n\nIn the previous post, titled [3 ways to approach GitOps](https://about.gitlab.com/blog/gitops-done-3-ways/), we discussed the many benefits and options that GitLab supports for fulfilling the [GitOps](/topics/gitops/) requirements of customers, whose IT environments are composed of heterogeneous technologies and infrastructures. This post is a 3-part series, in which we delve deeper into these options. In this first part, we cover the pull-based or agent-based approach.\n\n## About a pull-based or agent-based approach\n\nIn this approach, an agent is installed in your infrastructure components to pull changes whenever there is a drift from the desired configuration, which resides in GitLab. Although the infrastructure components could be anything from a physical server or router to a VM or a database, we will focus on a Kubernetes cluster in this section.\n\nIn the following example, the [reconciliation loop](https://about.gitlab.com/solutions/gitops/) is made up of two components: an agent running on the Kubernetes cluster and a server-side service running on the GitLab instance. One of the benefits of this approach is that you don’t have to expose your Kubernetes clusters outside your firewall. Another benefit is its distributed architecture, in that agents running on the infrastructure components are in charge of correcting any drift relieving the server-side from resource consumption. This approach requires the maintenance and installation of agents on all infrastructure components you want to be part of your GitOps flows.\n\n### GitLab Agent for Kubernetes as a pull-based approach\n\n[Introduced](https://about.gitlab.com/releases/2020/09/22/gitlab-13-4-released/#introducing-the-gitlab-kubernetes-agent) as part of GitLab 13.4, the GitLab Agent for Kubernetes runs on your Kubernetes cluster and pulls changes in your infrastructure configuration from GitLab to your cluster keeping your infrastructure configuration from drifting away from its desired state.\n\nGitLab Agent for Kubernetes (the feature) is currently implemented as two components ([architecture doc](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent/-/blob/master/doc/architecture.md)):\n\n- GitLab Agent for Kubernetes (agentk program): The component that users install into their cluster.\n\n- GitLab Agent for Kubernetes Server (kas program): The server-side counterpart, that runs \"next to GitLab.\"\n\nThe high-level architecture of the GitLab Agent for Kubernetes is depicted below:\n\n![GitLab K8s agent high-level architecture](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/0-K8s-agent-arch.png){: .shadow.small.center.wrap-text}\nGitLab K8s agent high-level architecture.\n{: .note.text-center}\n\nThe **agentk** is installed on your Kubernetes cluster and it is the component that applies updates to the infrastructure. The **kas** is installed on the GitLab instance and it manages the authentication and authorization between **agentk** instances and GitLab, monitors projects for any changes and gathers latest project manifests to send to **agentk** instances.\n\n> **NOTE:** on Gitlab.com, the **kas** is installed and maintained by GitLab. On self-managed instances, the customer needs to install it.\n\nIn the following self-managed instance example, we go through a GitOps flow that leverages the pull-based approach to GitOps.  After the **agentk** component has already been installed on the K8s cluster, the user proceeds to log on to the GitLab instance and creates a project called **gitops-project**:\n\n![Creating the gitops-project](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/1-create-gitops-proj.png){: .shadow.medium.center.wrap-text}\nCreating the gitops-project.\n{: .note.text-center}\n\nThe project **gitops-project** will be the one that will be monitored or observed by the **kas** component. Then, under **gitops-project**, the user creates an empty manifest file called **manifest.yaml**. This is the manifest file that will contain the Infrastructure as Code configuration for this project:\n\n![Manifest file created](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/2-manifest-file-created.png){: .shadow.medium.center.wrap-text}\nManifest file created.\n{: .note.text-center}\n\nNext, the user creates a Kubernetes agent configuration repository project, **kubernetes-agent**, which will contain information pertinent to the **kas** component.\n\n![Creating the kubernetes-agent project](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/3-create-K8s-agent-proj.png){: .shadow.medium.center.wrap-text}\nCreating the kubernetes-agent project.\n{: .note.text-center}\n\nWithin the **kubernetes-agent** project, the user creates a subdirectory **.gitlab/agents/agent1**, where **agent1** is the name given to this specific agent:\n\n![Config.yaml file created](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/4-config-yaml-created.png){: .shadow.medium.center.wrap-text}\nConfig.yaml file created.\n{: .note.text-center}\n\nNotice that in the screenshot above, the project to be observed, **gitops-project**, was created in an earlier step.\n\nThe next step consists of the creation of a GitLab Rails Agent record to associate it with the Kubernetes agent configuration repository project. In the following screenshot, you see the commands that the user enters to first identify the task-runner pod, to log into it, to enter the Rails Console, and finally to create the agent record and a token for it:\n\n![Agent record created](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/5-agent-record-created.png){: .shadow.medium.center.wrap-text}\nAgent record created.\n{: .note.text-center}\n\nIn the above screenshot, the last command uses the agent token to create a secret on the K8s cluster for secured communication between the **agentk** and the **kas** components.\n\nThe **agentk** pod creation on the K8s cluster is the next step. For this, the user creates a **resources.yml** file, in which the secured communication protocol between the **agentk** and the **kas** is specified as shown in the following snippet:\n\n![Websockets line](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/6-wss-line-in-resources-yml.png){: .shadow.medium.center.wrap-text}\nWebSockets communication specified in the resources.yml file.\n{: .note.text-center}\n\nIn the above snippet, secured WebSockets protocol is being used. GitLab also supports gRPC.\n\nOnce the **resources.yml** file is updated with the corresponding GitLab instance information, the user proceeds to create the pod:\n\n![Agentk pod created](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/7-agentk-created.png){: .shadow.medium.center.wrap-text}\nCreation of the **agentk** pod.\n{: .note.text-center}\n\nIn the screenshot above, you can see the execution of the **kubectl apply** that created the **agentk** pod in the K8s cluster.\n\nNow that the **agentk** and **kas** have been installed and are communicating securely with each other, the user can start performing some GitOps flows. Although the [GitLab Flow](https://about.gitlab.com/topics/version-control/what-is-gitlab-flow/) is the recommended approach for DevOps, it is also applicable to GitOps flows; after all GitOps is all about applying the goodness of DevOps to managing [Infrastructure as Code](/topics/gitops/infrastructure-as-code/).\n\nThis means that the user should create an issue and then a merge request, in which all stakeholders can collaborate towards the resolution of the issue. For the sake of brevity, in this technical blog post, we will skip all these steps and show you how updates to the Infrastructure as Code configuration files are automatically applied to the infrastructure components.\n\nNOTE: Fostering Collaboration is a great benefit of GitOps. For more information on this, check out this short [tech video](https://youtu.be/onFpj_wvbLM).\n\nFor example, the user can start making updates to the **manifest.yaml** file under the **gitops-project**, which is being observed by the kas component. Here you can see the user has pasted content into this file:\n\n![Manifest.yaml file updated](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/8-manifest-yaml-updated.png){: .shadow.medium.center.wrap-text}\nManifest.yaml file updated.\n{: .note.text-center}\n\nRemember that this file had been created as an empty file. As soon as the user commits the changes displayed above, the **kas** component will detect the changes and communicate these to the **agentk** component, which is running on the K8s cluster. The **agentk** will immediately apply these changes to the infrastructure. In this example, the user has updated the infrastructure configuration file to have 2 instances of an nginx. As shown in the screenshot below, the **agentk** has applied these updates by the instantiation of 2 nginx pods in the K8s cluster:\n\n![Two nginx pods up and running](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/9-two-nginx-running.png){: .shadow.medium.center.wrap-text}\nGitOps flow instantiates two nginx pods.\n{: .note.text-center}\n\nIf the user were to change the **manifest.yaml** file one more time and increment the replicas of the nginx pod to 3:\n\n![Manifest.yaml file updated with 3 nginx](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/10-manifest-yaml-updated-again.png){: .shadow.medium.center.wrap-text}\nManifest.yaml file updated with 3 nginx instances.\n{: .note.text-center}\n\nAgain, as soon as the commit takes place, the **kas** component detects the update and communicates this to the **agentk** component, which in turn, spins up a third nginx pod in the K8s cluster:\n\n![Three nginx pods up and running](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/11-three-nginx-running.png){: .shadow.medium.center.wrap-text}\nGitOps flow instantiates a third nginx pod.\n{: .note.text-center}\n\nLastly, the user can check the log files of the different components running on GKE, in this example. In the following screenshot, the user can see the **kas** component running on the GitLab instance:\n\n![kas running on GKE](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/12-kas-on-GKE.png){: .shadow.medium.center.wrap-text}\nThe **kas** component running on GKE.\n{: .note.text-center}\n\nAnd then the user can drill down into the log of the **kas** component, and see how it is detecting commits on the project it is observing:\n\n![kas log on GKE](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/13-kas-log-on-GKE.png){: .shadow.medium.center.wrap-text}\nThe **kas** log output on GKE.\n{: .note.text-center}\n\nLikewise, the user can navigate to the **agentk** component of the K8s cluster:\n\n![agentk running on GKE](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/14-agentk-on-GitLab.png){: .shadow.medium.center.wrap-text}\nThe **agentk** component running on GKE.\n{: .note.text-center}\n\nAnd, again drill down to its log to see, how the **agentk** component runs synchronizations with the **kas** component:\n\n![agentk log on GKE](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/15-agentk-log-top-on-GitLab.png){: .shadow.medium.center.wrap-text}\nThe **agentk** log output on GKE.\n{: .note.text-center}\n\nIn the following screenshot, the user sees the log statements indicating that the **agentk** is instantiating a third instance of an nginx pod:\n\n![agentk instantiating a third nginx pod](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/16-agentk-log-synced-on-GitLab.png){: .shadow.medium.center.wrap-text}\nThe **agentk** instantiating a third nginx pod.\n{: .note.text-center}\n\nThe above sections described an example of the setup needed to install and run the GitLab Agent for Kubernetes as well as how projects are monitored and synchronized from GitLab to a running K8s cluster.\n\n## Conclusion\n\nWe have gone over the setup and use of the Agent, which is an integral part of our pull-based or agent-based approach to GitOps. We also covered a GitOps flow that leveraged this agent-based approach, which is a good choice for Kubernetes shops that need to keep their clusters secured and behind their firewall. This approach comes with its drawbacks in that you need to maintain the agents, which also consume the resources of your infrastructure components. In part two of this series, we will discuss the push-based or agentless approach to GitOps.\n\nCover image by [Vincent Ledvina](https://unsplash.com/@vincentledvina?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/grand-tetons?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,1014,1034,769,931],"CI",{"slug":1036,"featured":6,"template":683},"how-to-use-agent-based-gitops","content:en-us:blog:how-to-use-agent-based-gitops.yml","How To Use Agent Based Gitops","en-us/blog/how-to-use-agent-based-gitops.yml","en-us/blog/how-to-use-agent-based-gitops",{"_path":1042,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1043,"content":1049,"config":1057,"_id":1059,"_type":14,"title":1060,"_source":16,"_file":1061,"_stem":1062,"_extension":19},"/en-us/blog/how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery",{"title":1044,"description":1045,"ogTitle":1044,"ogDescription":1045,"noIndex":6,"ogImage":1046,"ogUrl":1047,"ogSiteName":671,"ogType":672,"canonicalUrls":1047,"schema":1048},"How to use OCI images as the source of truth for continuous delivery","Discover the benefits of using Open Container Initiative images as part of GitOps workflows and the many features GitLab offers to simplify deployments to Kubernetes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097601/Blog/Hero%20Images/Blog/Hero%20Images/REFERENCE%20-%20Use%20this%20page%20as%20a%20reference%20for%20thumbnail%20sizes_76Tn5jFmEHY5LFj8RdDjNY_1750097600692.png","https://about.gitlab.com/blog/how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use OCI images as the source of truth for continuous delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Daniel Helfand\"}],\n        \"datePublished\": \"2025-02-19\",\n      }",{"title":1044,"description":1045,"authors":1050,"heroImage":1046,"date":1051,"body":1052,"category":1053,"tags":1054},[1010],"2025-02-19","Is [GitOps](https://about.gitlab.com/topics/gitops/) still GitOps if you are not using a git repository as your deployment artifact? While git remains central to GitOps workflows, storing infrastructure definitions as Open Container Initiative (OCI) artifacts in container registries has seen a rise in adoption as the source for GitOps deployments. In this article, we will dive deeper into the ideas behind this trend and how GitLab features support this enhancement to GitOps workflows.\n\n## What is GitOps?\n\nThe [OpenGitOps](https://opengitops.dev/) project has defined [four principles](https://opengitops.dev/#principles) for the practice of GitOps:\n- A [system managed by GitOps](https://github.com/open-gitops/documents/blob/v1.0.0/GLOSSARY.md#software-system) must have its [desired state expressed declaratively](https://github.com/open-gitops/documents/blob/v1.0.0/GLOSSARY.md#declarative-description).\n- Desired state is stored in a way that enforces immutability and versioning, and retains a complete version history.\n- Software agents automatically pull the desired state declarations from the source.\n- Software agents [continuously](https://github.com/open-gitops/documents/blob/v1.0.0/GLOSSARY.md#continuous) observe actual system state and [attempt to apply the desired state](https://github.com/open-gitops/documents/blob/v1.0.0/GLOSSARY.md#reconciliation).\n\nAn example of GitOps is storing the Kubernetes manifests for a microservice in a GitLab project. Those Kubernetes resources are then continuously reconciled by a [controller](https://kubernetes.io/docs/concepts/architecture/controller/) running on the Kubernetes cluster where the microservice is deployed to. This allows engineers to manage infrastructure using the same workflows as working with regular code, such as opening merge requests to make and review changes and versioning changes. GitOps also has operational benefits such as [preventing configuration drift](https://about.gitlab.com/topics/gitops/#cicd) and helps engineers audit what changes led to certain outcomes with deployments.\n\n## Benefits and limitations of git in GitOps workflows\n\nWhile git is an essential piece of GitOps workflows, git repositories were not designed to be deployed by GitOps controllers. Git does provide the ability for engineers to collaborate on infrastructure changes and audit these changes later on, but controllers do not need to download an entire git repository for a successful deployment. GitOps controllers simply need the infrastructure defined for a particular environment.\n\nAdditionally, an important piece of the deployment process is to [sign and verify deployments](https://docs.sigstore.dev/about/overview/#why-cryptographic-signing) to assure deployment changes to an environment are coming from a trusted source. While git commits can be signed and verified by GitOps controllers, commits may also capture other details not related to the deployment itself (e.g., documentation changes, updates to other environments, and git repository restructuring) or not enough of the deployment picture as a deployment may consist of multiple commits. This again feels like a case this git feature wasn’t designed for.\n\nAnother challenging aspect of git in GitOps workflows is that it can sometimes lead to more automation than expected. Soon after merging a change to the watched branch, it will be deployed. There are no controls in the process outside of git. How can you make sure that nothing gets deployed on a Friday late afternoon? What if teams responsible for deployment do not have permissions to merge changes in certain GitLab projects? Using OCI images adds a pipeline into the process, including all the delivery control features, like [approvals or deploy freezes](https://docs.gitlab.com/ee/ci/environments/protected_environments.html).\n\n## OCI images\n\nThe [Open Container Initiative](https://opencontainers.org/) has helped to define standards around container formats. While most engineers are familiar with building Dockerfiles into container images, many may not be as familiar with storing Kubernetes manifests in a container registry. Because [GitLab’s Container Registry](https://docs.gitlab.com/ee/user/packages/container_registry/) is OCI compliant, it allows for users to push Kubernetes manifests for a particular environment to a container registry. GitOps controllers, such as [Flux CD](https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab/), can use the manifests stored in this OCI artifact instead of needing to clone an entire git repository.\n\nOften in GitOps workflows, a git repository can include the infrastructure definitions for all environments that a microservice will be deployed to. By packaging the Kubernetes manifests for only a specific environment, Flux CD can download the minimum files needed to carry out a deployment to a specific environment.\n\n### Security benefits of using OCI artifacts\n\nAs mentioned previously, signing and verifying the artifacts to be deployed to an environment adds an additional layer of security for software projects. After Kubernetes manifests are pushed to a container registry, a tool like [Sigstore Cosign](https://docs.sigstore.dev/quickstart/quickstart-cosign/) can be used to sign the OCI image with a private key that can be securely stored in a GitLab project as a [CI/CD variable](https://docs.gitlab.com/ee/ci/variables/). Flux CD can then use a public key stored on a Kubernetes cluster to verify that a deployment is coming from a trusted source.\n\n## Using GitLab to push and sign OCI images\n\nGitLab offers many features that help simplify the process of packaging, signing, and deploying OCI images. A common way to structure GitLab projects with GitOps workflows is to have separate GitLab projects for microservices’ code and a single infrastructure repository for all microservices. If an application is composed of `n` microservices, this would require having `n +1` GitLab projects for an application.\n\nThe artifact produced by a code project is usually a container image that will be used to package the application. The infrastructure or delivery project will contain the Kubernetes manifests defining all the resources required to scale and serve traffic to each microservice. The artifact produced by this project is usually an OCI image used to deploy the application and other manifests to Kubernetes.\n\nIn this setup, separation of environments is handled by defining Kubernetes manifests in separate folders. These folders represent environments (e.g., development, staging, and production) that will host the application. When changes are made to the code project and a new container image is pushed, all that needs to be done to deploy these changes via GitLab’s integration with Flux CD is to edit the manifests under the environment folder to include the new image reference and open a merge request. Once that merge request is reviewed, approved, and merged, the delivery project’s CI/CD job will push a new OCI image that Flux CD will pick up and deploy to the new environment.\n\n![OCI images - flow chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097611/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097611046.png)\n\nSigning an OCI image is as simple as including Cosign in your project’s CI/CD job. You can simply generate a new public and private key with Cosign by running the commands below locally. Just make sure to log in to your GitLab instance with the [glab CLI](https://gitlab.com/gitlab-org/cli/#installation) and replace the [`PROJECT_ID`] for the Cosign command with your [delivery project’s ID](https://docs.gitlab.com/ee/user/project/working_with_projects.html#access-a-project-by-using-the-project-id).\n\n```\nglab auth login\ncosign generate-key-pair gitlab://[PROJECT_ID]\n```\n\nOnce the cosign command runs successfully, you can see the Cosign keys added to your project under the CI/CD variables section under the key names `COSIGN_PUBLIC_KEY` and `COSIGN_PRIVATE_KEY`.\n\n### Example CI/CD job\n\nA GitLab CI/CD job for pushing an OCI image will look something like the following:\n\n```yaml\nfrontend-deploy:\n  rules:\n  - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    changes:\n      paths:\n      - manifests/dev/frontend-dev.yaml\n  trigger:\n    include:\n      - component: gitlab.com/components/fluxcd/oci-artifact@0.3.1\n        inputs:\n          version: 0.3.1\n          kubernetes_agent_reference: gitlab-da/projects/tanuki-bank/flux-config:dev\n          registry_image_url: \"oci://$CI_REGISTRY_IMAGE/frontend\"\n          image_tag: dev\n          manifest_path: ./manifests/dev/frontend-dev.yaml\n          flux_oci_repo_name: frontend\n          flux_oci_namespace_name: frontend-dev\n          signing_private_key: \"$COSIGN_PRIVATE_KEY\"\n```\n\nThe [GitLab CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/) offers a GitLab-maintained [CI/CD component for working with OCI artifacts and Flux CD](https://gitlab.com/explore/catalog/components/fluxcd). This component allows development teams to push Kubernetes manifests as OCI images to GitLab’s Container Registry or an external container registry, sign the OCI image using Cosign, and immediately reconcile the newly pushed image via Flux CD.\n\nIn the example above, the Flux CD `component` is included in a `.gitlab-ci.yml` file of a GitLab project. Using the component’s `inputs`, users can define what registry to push the image to (i.e., `registry_image_url` and `image tag`), the file path to Kubernetes manifests that will be pushed (i.e., `manifest_path`), the cosign private key used to sign images (i.e., `signing_private_key`), and the Kubernetes namespace and Flux CD [OCIRepository](https://fluxcd.io/flux/components/source/ocirepositories/) name needed to sync updates to an environment (i.e., `flux_oci_namespace_name` and `flux_oci_repo_name`).\n\nThe `kubernetes_agent_reference` allows GitLab CI/CD jobs to inherit the `kubeconfig` needed to access a Kubernetes cluster without needing to store a `kubeconfig` CI/CD variable in each GitLab project. By setting up the [GitLab agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/), you can configure all GitLab projects’ CI/CD jobs in a [GitLab group](https://docs.gitlab.com/ee/user/group/) to inherit permissions to deploy to the Kubernetes cluster.\n\nThe agent for Kubernetes context is typically configured wherever you configure the GitLab Agent for Kubernetes in your GitLab group. It is typically recommended that this be done in the project where Flux CD is managed. More information on configuring the agent for CI/CD access can be found in our [CI/CD workflow documentation](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html).\n\nThe variables `$COSIGN_PRIVATE_KEY`, `$FLUX_OCI_REPO_NAME`, and `$FRONTEND_DEV_NAMESPACE` are values stored as CI/CD variables to easily access and mask these sensitive pieces of data in CI/CD logs. The `$CI_REGISTRY_IMAGE` is a variable that GitLab jobs have available by default that specifies the GitLab project’s container registry.\n\n### Deploy OCI images\n\nUsing [Flux CD with your GitLab projects](https://docs.gitlab.com/ee/user/clusters/agent/gitops/flux_tutorial.html), you can automate deployments and signing verification for your microservice’s environments. Once Flux CD is configured to sync from a GitLab project, you could add the following Kubernetes [custom resource definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) to your project to sync your pushed OCI image.\n\n```yaml\napiVersion: v1\nkind: Namespace\nmetadata:\n  name: frontend-dev\n  labels:\n    name: frontend-dev\n---\napiVersion: bitnami.com/v1alpha1\nkind: SealedSecret\nmetadata:\n  name: cosign-public-key\n  namespace: frontend-dev\nspec:\n  encryptedData:\n    cosign.pub: AgAKgLf4VbVzJOmr6++k81LlFayx88AELaUQFNOaXmBF4G+fBfBYeABl0skNvMAa1UrPVNSfMIHgFoYHoO96g576a+epk6V6glOI+++XvYbfsygof3GGxe0nL5Qh2b3ge0fNpyd0kTPSjTj0YUhRhKtMGMRSRw1jrwhNcGxCHK+Byibs52v8Np49KsIkeZKbzLdgYABkrv+k0j7hQM+jR180NpG+2UiRvaXpPuogxkbj61FEqWGrJHk8IVyfl3eh+YhoXxOHGDqko6SUC+bUZPDBlU6yKegO0/8Zq3hwulrSEsEjzRZNK+RFVMOLWWuC6h+WGpYhAMcsZPwjjJ/y29KLNa/YeqkN/cdk488QyEFc6ehCxzhH67HxIn2PDa+KkEOTv2TuycGF+Q00jKIizXF+IwLx/oRb3pTCF0AoAY8D8N3Ey+KfkOjsBON7gGID8GbQiJqX2IgIZxFMk0JRzxbRKOEqn+guLd5Shj7CD1a1Mkk0DxBdbqrGv2XNYUaFPI7xd3rZXUJZlnv+fsmwswsiGWRuXwim45HScWzQnfgLAe7tv3spVEGeaO5apl6d89uN21PBQnfE/zyugB//7ZW9tSp6+CSMyc5HynxI8diafqiwKPgvzLmVWRnkvxJijoXicRr3sCo5RudZPSlnjfd7CKdhwEVvLl7dRR4e/XBMdxCzk1p52Pl+3/kJR+LJii5+iwOpYrpVltSZdzc/3qRd19yMpc9PWpXYi7HxTb24EOQ25i21eDJY1ceplDN6bRtop2quzkjlwVeE2i4cEsX/YG8QBtQbop/3fjiAjKaED3QH3Ul0PECS9ARTScSkcOL3I00Xpp8DyD+xH0/i9wCBRDmH3yKX18C8VrMq02ALSnlP7WCVVjCPzubqKx2LPZRxK9EG0fylwv/vWQzTUUwfbPQZsd4c75bSTsTvxqp/UcFaXA==\n  template:\n    metadata:\n      name: cosign-public-key\n      namespace: frontend-dev\n---\napiVersion: source.toolkit.fluxcd.io/v1beta2\nkind: OCIRepository\nmetadata:\n    name: frontend\n    namespace: frontend-dev\nspec:\n    interval: 1m\n    url: oci://registry.gitlab.com/gitlab-da/projects/tanuki-bank/tanuki-bank-delivery/frontend\n    ref:\n        tag: dev\n    verify:\n      provider: cosign\n      secretRef:\n        name: cosign-public-key\n---\napiVersion: kustomize.toolkit.fluxcd.io/v1\nkind: Kustomization\nmetadata:\n    name: frontend\n    namespace: frontend-dev\nspec:\n    interval: 1m\n    targetNamespace: frontend-dev\n    path: \".\"\n    sourceRef:\n        kind: OCIRepository\n        name: frontend\n    prune: true\n```\n\nThe [`Kustomization`](https://fluxcd.io/flux/components/kustomize/kustomizations/) resource allows for further customization of Kubernetes manifests and also specifies which namespace to deploy resources to. The `OCIRepository` resource for Flux CD allows users to specify the OCI image repository reference and tag to regularly sync from. Additionally, you will notice the `verify.provider` and `verify.secretRef` properties. These fields allow you to verify that the OCI image deployed to the cluster was signed by the corresponding Cosign private key used in the earlier CI/CD job.\n\nThe public key needs to be stored in a [Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/) that will need to be present in the same namespace as the `OCIRepository` resource. To have this secret managed by Flux CD and not store the secret in plain text, you can consider using [SealedSecrets](https://fluxcd.io/flux/guides/sealed-secrets/) to encrypt the value and have it be decrypted cluster side by a controller.\n\nFor a simpler approach not requiring SealedSecrets, you can [deploy the secret via a GitLab CI/CD](https://docs.gitlab.com/ee/user/clusters/agent/getting_started_deployments.html) job using the [`kubectl CLI`](https://kubernetes.io/docs/reference/kubectl/). In the non-sealed secret approach, you would simply remove the SealedSecret included above and run the job to deploy the public key secret before running the job to push the new OCI image. This will make sure the secret is stored securely in GitLab and make sure the secret can be accessed on the cluster by the OCIRepository. While this approach is a bit simpler, just note this is not a suitable approach for managing secrets in production.\n\n## The benefits of OCI, GitLab, and GitOps\n\nOCI artifacts allow for GitOps teams to take deployments even further with added security benefits and allowing for deployments to be minimal. Users still gain all the benefits offered by git as far as having a source of truth for infrastructure and collaborating on projects. OCI images add a packaging approach that improves the deployment aspect of GitOps.\n\nGitLab continues to learn from our customers and the cloud native community on building experiences that help simplify GitOps workflows. To get started using some of the features mentioned in this blog, you can sign up for a [60-day free trial of GitLab Ultimate](https://about.gitlab.com/free-trial/). We would also love to hear from users about their experiences with these tools, and you can provide feedback in the [community forum](https://forum.gitlab.com/t/oci-images-as-source-of-truth-for-gitops-with-gitlab/120965).\n","open-source",[109,1055,812,9,1056,834],"open source","git",{"slug":1058,"featured":6,"template":683},"how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery","content:en-us:blog:how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery.yml","How To Use Oci Images As The Source Of Truth For Continuous Delivery","en-us/blog/how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery.yml","en-us/blog/how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery",{"_path":1064,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1065,"content":1071,"config":1079,"_id":1081,"_type":14,"title":1082,"_source":16,"_file":1083,"_stem":1084,"_extension":19},"/en-us/blog/production-grade-infra-devsecops-with-five-minute-production",{"title":1066,"description":1067,"ogTitle":1066,"ogDescription":1067,"noIndex":6,"ogImage":1068,"ogUrl":1069,"ogSiteName":671,"ogType":672,"canonicalUrls":1069,"schema":1070},"GitOps & DevSecOps for production infrastructure in minutes","Unlock production-grade infrastructure and development workflows in under five minutes with Five Minute Production App: a blend of solutions offered by AWS, Hashicorp Terraform, and GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665839/Blog/Hero%20Images/devops.png","https://about.gitlab.com/blog/production-grade-infra-devsecops-with-five-minute-production","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Production-grade infrastructure, GitOps convergence, and DevSecOps in under 5 minutes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sri Rangan\"}],\n        \"datePublished\": \"2021-02-24\",\n      }",{"title":1072,"description":1067,"authors":1073,"heroImage":1068,"date":1075,"body":1076,"category":767,"tags":1077},"Production-grade infrastructure, GitOps convergence, and DevSecOps in under 5 minutes",[1074],"Sri Rangan","2021-02-24","\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2021-03-10.\n{: .note .alert-info .text-center}\n\nThis is a story about achieving production-grade infrastructure in under five minutes.\\\\\nThis is a story about achieving production-grade DevSecOps in under five minutes.\\\\\nThis is a story about achieving total convergence of GitOps in under five minutes.\n\nMy name is Sri and over the last three months and I worked closely with GitLab co-founder [DZ](/company/team/#dzaporozhets) in building \"Five Minute Production App.\"\n\nThe app blends solutions offered by AWS, Hashicorp Terraform, and GitLab, and offers production-grade infrastructure and development workflows in under five minutes.\n\n![Five Minute Production App Diagram](https://about.gitlab.com/images/blogimages/five-min-prod-01-complete-flow.png){: .shadow.medium.center}\n\nApart from the efficiencies gained from using Five Minute Production App, you benefit by achieving stateful, production-ready infrastructure on the AWS hypercloud.\n\nWe started with AWS first, as it is the hypercoud leader today. Support for Azure and Google Cloud is on the roadmap.\n\nOur vision and design decisions are explained in the [README](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#quickly).\n\n## Quickstart \n\nWe start with your GitLab project which has the source code of your web application. Regardless of which language or framework you use, your web application is packaged as a container image and stored within your GitLab project's Container Registry.\nThis is the Build stage.\n\nThis is followed by the Provision stage where Terraform scripts connect to AWS and create a secure environment for your web application.\nThe environments provisioned relate to your Git branching workflow.\nLong-lived Git branches create long-lived environments, and short-lived Git branches correspond to short-lived environments.\n\nResources provisioned include an Ubuntu VM, scalable PostgreSQL database, a Redis cluster, and S3 object storage.\nWe consider these elements as the building blocks for majority of web applications, and many of these fall under AWS free tier.\n\nThe infra state and credentials are stored within your GitLab project's managed Terraform state.\n\nFinally, we reach the Deploy stage which:\n1. Retrieves the deployable image from the GitLab Container Registry\n1. Retrieves the infrastructure credentials from the Gitlab Managed Terraform State, and\n1. Proceeds to deploy your web application\n\nEverything is achieved by including these two lines in your `.gitlab-ci.yml` file.\n\n```yaml\ninclude:\n  remote: https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/raw/stable/deploy.yml\n```\n\nLet's look at the complete process in more detail.\n\n![Three stages of Five Minute Production App](https://about.gitlab.com/images/blogimages/five-min-prod-02-pipeline.png){: .shadow.medium.center}\nThe three stages of Five Minute Production App\n{: .note.text-center}\n\n## Build and package\n\nThe Build stage is where it all begins. Five Minute Production App reuses the [Auto Build stage](https://docs.gitlab.com/ee/topics/autodevops/stages.html#auto-build) from the GitLab Auto DevOps pipeline.\n\nAuto Build builds and packages web applications that are:\n1. Containerized with a Dockerfile, or\n2. Compatible with the Cloud Native buildpack, or\n3. Compatible with the Heroku buildpack\n\nThus, web applications across multitudes of technologies are supported, including web frameworks such as Rails, Django, Express, Next.js, Spring, etc.\nand programming languages including Python, Java, Node.js, Ruby, Clojure, etc.\n\nOnce the Auto Build job has finished execution, the newly created container image is stored as an artifact in your GitLab project's Container Registry.\n\n## Provision the infrastructure\n\nThe next step, Provision, prepares infrastructure resources in AWS.\nThe first requirement here is the presence of AWS credentials stored as CI/CD variables at the project or group level.\nOnce valid AWS credentials are found, a Terraform script is executed to generate resources in AWS.\n\nThese resources include:\n1. EC2 VM based on Ubuntu 20.04 LTS\n2. PostgreSQL database managed by AWS RDS\n3. Redis cluster managed by AWS ElastiCache\n4. S3 bucket for file storage\n5. Email Service credentials managed by AWS SES\n\nThe most critical resource is the PostgreSQL service which has daily backups enabled.\nPostgreSQL data is snapshotted if the infrastructure resource is \"destroyed\" through a manual user action via the Five Minute Production App pipeline.\n\nThe EC2 VM is the only service accessible publicly. Ports 22, 80 and 443 are exposed.\nEvery other resource described above is part of a secure, private network, hidden from the public web, accessible ony via the EC2 instance and your web applicable deployed there.\n\nThe stateful services and environments are tied to your Git branches.\\\\\nThis means every Git branch creates a new environment with these resource sets.\\\\\nWe don't have a preference on your Git branching and environments lifecycle.\\\\\nUse long-lived or short-lived branches as you see fit, just keep in mind that long-lived branches leads to long-lived environments and short-lived branches leads to short-lived environments.\n\n![Infrastructure resources provisioned on AWS](https://about.gitlab.com/images/blogimages/five-min-prod-03-infra-resources.png){: .shadow.medium.center}\nInfrastructure resources provisioned on AWS\n{: .note.text-center}\n\n## Deploy your web application\n\nFinally comes the Deploy stage.\n\nThis is where the deploy script retrieves your web application package (container image) from the GitLab Container Registry, then retrieves the EC2 instance\ncredentials from the GitLab Managed Terraform State, and proceeds to deploy the relevant version of your web application in its environment.\n\nModern web applications might require additional commands being executed after each deployment or after the initial deployment,\nand these commands can be defined as variables in your `.gitlab-ci.yml` file.\n\nFinally, with the help of Certbot from Letsencrypt, SSL certificates are generated and configured for your web application.\nIf you have defined the `CERT_DOMAIN` CI/CD variable the SSL certificate will be generated for your custom domain name.\nOtherwise the generated SSL certificate uses a dynamic URL that Five Minute Production App prepares for you.\n\n## Conclusion\n\nThere we have it. A simple yet production-ready setup for your web application. If you are looking for an AWS-based setup, this is ready for usage.\n\nIf you are looking for something similar but not quite Five Minute Production App, this serves as an example of how to converge infrastructure-as-code with software development and provide seamless continuous deployment workflows.\n\nIn my personal experience, this is one of the most complete examples of GitOps:\n\n1. Your application source code lives in your GitLab project\n2. Your infrastructure defined as code lives in your GitLab project\n3. Your CI/CD pipeline lives in your GitLab project\n4. Your infrastructure state lives in your GitLab project\n5. Your infrastructure secrets and credentials live in your GitLab project\n6. Your environments configuration lives in your GitLab project\n\nThis complete GitOps convergence is not specifically configured for one project. It can be included as a template from multiple projects.\nThere is no reason why the GitLab project in your organization cannot be the single source of truth for everything.\n\n### Links\n\n- [Five Minute Production App](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/blob/master/README.md)\n- [Reference Examples](https://gitlab.com/gitlab-org/5-minute-production-app/examples)\n\n### About the author\n\n[Sri Rangan](mailto:sri@gitlab.com), an Enterprise Solutions Architect with GitLab, is a core-contributor and maintainer\nof [Five Minute Production App](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/blob/master/README.md).",[1034,811,769,813,9,1078],"production",{"slug":1080,"featured":6,"template":683},"production-grade-infra-devsecops-with-five-minute-production","content:en-us:blog:production-grade-infra-devsecops-with-five-minute-production.yml","Production Grade Infra Devsecops With Five Minute Production","en-us/blog/production-grade-infra-devsecops-with-five-minute-production.yml","en-us/blog/production-grade-infra-devsecops-with-five-minute-production",{"_path":1086,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1087,"content":1092,"config":1101,"_id":1103,"_type":14,"title":1104,"_source":16,"_file":1105,"_stem":1106,"_extension":19},"/en-us/blog/pull-based-kubernetes-deployments-coming-to-gitlab-free-tier",{"title":1088,"description":1089,"ogTitle":1088,"ogDescription":1089,"noIndex":6,"ogImage":865,"ogUrl":1090,"ogSiteName":671,"ogType":672,"canonicalUrls":1090,"schema":1091},"Pull-based GitOps moving to GitLab Free tier","Learn how this change provides organizations increased flexibility, security, scalability, and automation in cloud-native environments.","https://about.gitlab.com/blog/pull-based-kubernetes-deployments-coming-to-gitlab-free-tier","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Pull-based GitOps moving to GitLab Free tier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"},{\"@type\":\"Person\",\"name\":\"Lauren Minning\"}],\n        \"datePublished\": \"2022-05-18\",\n      }",{"title":1088,"description":1089,"authors":1093,"heroImage":865,"date":1096,"body":1097,"category":1098,"tags":1099},[1094,1095],"Sandra Gittlen","Lauren Minning","2022-05-18","\n\nGitLab will include support for pull-based deployment in the platform’s Free tier in an upcoming release, which will provide users increased flexibility, security, scalability, and automation in cloud-native environments. With pull-based deployment, DevOps teams can use the [GitLab agent for Kubernetes](/blog/introducing-the-gitlab-kubernetes-agent/) to automatically identify and enact application changes. \n\n“DevOps teams at all levels benefit from utilizing GitOps strategies such as pull-based deployment in their cloud-native environments. By offering this feature in GitLab’s Free tier, we can introduce more organizations to the power and utility of this secure and scalable functionality,” says [Viktor Nagy](https://gitlab.com/nagyv-gitlab), product manager of GitLab’s Configure Group.\n\nAs an open-core company, GitLab is happy to contribute to the GitOps community and enable the adoption of best practices in the industry.\n\n## What is pull-based deployment?\n\nPull-based and push-based deployment are [two main approaches to GitOps](/topics/gitops/), an operational framework that takes DevOps best practices used for application development such as version control, collaboration, compliance, and [CI/CD](/topics/ci-cd/) tooling, and applies them to infrastructure automation. \n\nGitOps enables operations teams to [move as quickly as their application development counterparts](/blog/gitops-done-3-ways/) by making use of automation and scalability, without sacrificing security. \n\nWhile push-based, or agentless, deployment relies on a CI/CD tool to push changes to the infrastructure environment, pull-based deployment uses an agent installed in a cluster to pull changes whenever there is a deviation from the desired configuration. In the pull-based approach, deployment targets are limited to Kubernetes and an agent must be installed in each Kubernetes cluster.\n\n“As long as the GitLab agent for Kubernetes on your infrastructure has the necessary access rights in your cluster, you can configure everything automatically, reducing the DevOps workload and the opportunity to introduce errors,” Nagy says.\n\n## Pull-based deployment vs. push-based deployment\n\nPush-based deployment and pull-based deployment each have their pros and cons. Here is a list of the advantages and disadvantages of each GitOps practice:\n\nPush-based deployment pros:\n- ease of use\n- well-known as part of CI/CD\n- more flexible, as deployment targets can be on physical servers or virtual containers, not restricted to Kubernetes clusters \n\nPush-based deployment cons:\n- requires organizations to open their firewall to a cluster and grant admin access to external CI/CD\n- requires organizations to adjust their CI/CD pipelines when they introduce new environments\n\nPull-based deployment pros:\n- secure infrastructure - no need to open your firewall or grant admin access externally\n- changes can be automatically detected and applied without human intervention\neasier scaling of identical clusters\n\nPull-based deployment cons:\n- agent needs to be installed in every cluster\n- limited to Kubernetes only\n\n## How pull-based deployment impacts the Free-tier experience\n\nIncluding support for pull-based deployments in GitLab’s Free tier provides a tremendous competitive advantage for smaller organizations as they can now apply automation in a safe and scalable manner to their cloud-native infrastructure, including virtual containers and clusters. And, for organizations that are trying to get started quickly by minimizing the number of tools in their infrastructure ecosystem, this functionality is included in One DevOps Platform, not as a point solution. \n\n“DevOps teams don’t have to continuously write code for new infrastructure elements – they can write the code once, within a single DevOps platform, and have the agent automatically find it, pull it, and apply it, as well as configuration changes,” Nagy says. “Also, with the availability of pull-based deployment in this introductory tier, newcomers to GitLab will immediately be able to modernize application development and reduce the security risk associated with configuring such infrastructure.”\n\n_This blog post contains information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n\n\n\n\n\n\n","news",[1100,812,702,813,9],"DevOps platform",{"slug":1102,"featured":6,"template":683},"pull-based-kubernetes-deployments-coming-to-gitlab-free-tier","content:en-us:blog:pull-based-kubernetes-deployments-coming-to-gitlab-free-tier.yml","Pull Based Kubernetes Deployments Coming To Gitlab Free Tier","en-us/blog/pull-based-kubernetes-deployments-coming-to-gitlab-free-tier.yml","en-us/blog/pull-based-kubernetes-deployments-coming-to-gitlab-free-tier",{"_path":1108,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1109,"content":1115,"config":1124,"_id":1126,"_type":14,"title":1127,"_source":16,"_file":1128,"_stem":1129,"_extension":19},"/en-us/blog/quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform",{"title":1110,"description":1111,"ogTitle":1110,"ogDescription":1111,"noIndex":6,"ogImage":1112,"ogUrl":1113,"ogSiteName":671,"ogType":672,"canonicalUrls":1113,"schema":1114},"Quick setup of a GKE Cluster with ArgoCD pre-installed using Terraform","Use this tutorial as a great starting point to manage your cluster entirely through GitOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665989/Blog/Hero%20Images/AdobeStock_618473457.jpg","https://about.gitlab.com/blog/quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Quick setup of a GKE Cluster with ArgoCD pre-installed using Terraform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"},{\"@type\":\"Person\",\"name\":\"Siddharth Mathur\"}],\n        \"datePublished\": \"2024-01-31\",\n      }",{"title":1110,"description":1111,"authors":1116,"heroImage":1112,"date":1119,"body":1120,"category":767,"tags":1121},[1117,1118],"Noah Ing","Siddharth Mathur","2024-01-31","This tutorial will walk you through setting up a Google Kubernetes Engine (GKE) Cluster with ArgoCD pre-installed, utilizing Terraform, in less than 10 minutes. This will be a great starting point to manage your cluster entirely through GitOps.\n\n### Prerequisites\n- GCP account with permissions to provision a GKE Cluster\n- Kubectl client v1.23.9\n- Kubectl server v1.23.16-gke.1400\n- Working knowledge of GKE\n- Basic knowledge of ArgoCD\n\n#### An overview of this tutorial is as follows:\n- Set up the GitLab Terraform GKE ArgoCD Template \n- Connect to your GKE Cluster\n- Grab the ArgoCD Initial Admin Secret\n- Log into ArgoCD \n- Enjoy your Kubernetes Cluster with ArgoCD!\n\n#### Set up the GitLab Terraform GKE ArgoCD template\n\nStart by importing the example project by URL - [https://gitlab.com/projects/new#import_project](https://gitlab.com/projects/new#import_project).\n\nTo import the project:\n1. In GitLab, on the top bar, select **Main menu > Projects > View all projects**.\n2. On the right of the page, select **New project**.\n3. Select **Import project**.\n4. Select **Repository by URL**.\n5. For the Git repository URL:\n    - [GitLab Terraform GKE ArgoCD](https://gitlab.com/demos/infrastructure/gitlab-terraform-gke-argocd)\n6. Complete the fields and select **Create project**.\n\n#### Add in your cloud credentials to CI/CD variables\n\n1. To authenticate GCP with GitLab, create a GCP service account with the following roles: **Compute Network Viewer, Kubernetes Engine Admin, Service Account User, and Service Account Admin**. Both User and Admin service accounts are necessary. The User role impersonates the default service account when creating the node pool. The Admin role creates a service account in the kube-system namespace.\n2. **Download the JSON file** with the service account key you created in the previous step.\n3. On your computer, encode the JSON file to base64 (replace /path/to/sa-key.json to the path to your key):\n\n```\nbase64 -i /path/to/sa-key.json\n```\n\n4. Use the output of this command as the **BASE64_GOOGLE_CREDENTIALS** environment variable in the next step.\n\nOn the left sidebar, select **Settings > CI/CD. Expand Variables**.\n5. Set the variable **BASE64_GOOGLE_CREDENTIALS** to the base64 encoded JSON file you just created.\n6. Set the variable **TF_VAR_gcp_project** to your GCP’s project name.\n\n![simpleargocd - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd_-_image_1.png)\n\n#### Run GitLab CI to deploy your Kubernetes cluster with ArgoCD Installed.\n\n![simpleargocd - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd_-_image_2.png)\n\n#### Connect to your GKE Cluster through your terminal using the following bash command.\n\n```bash\ngcloud container clusters get-credentials gitlab-terraform-gke-argocd --region us-central1 --project \u003Cproject-name>\n```\n\n![simpleargocd-image3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd-image3.png)\n\n#### Expose the Initial Admin Secret through your terminal using the following bash command. Make sure you save this password for later.\n\n```bash\nkubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath=\"{.data.password}\" | base64 -d\n```\n\n#### Port Forward ArgoCD to your localhost 8080 through your terminal using the following bash command. Go to Chrome localhost:8080 afterwards.\n\n```bash\nkubectl port-forward svc/argocd-server -n argocd 8080:443\n```\n\n#### Enter your admin and `Initial Admin Secret` to the login page.\n\n![simpleargocd - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd_-_image_4.png)\n\n#### Voila! You've bootstrapped your GKE cluster with ArgoCD. Enjoy your GitOps!\n\n![simpleargocd - image 5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd_-_image_5.png)\n\n### Next steps\nWe recommend reviewing [setting up Review Ops with ArgoCD](https://about.gitlab.com/blog/how-to-provision-reviewops/)! \n\n### References\n- [GitLab Learn Labs - Infrastructure Webinar](https://gitlab.com/gitlab-learn-labs/webinars/infrastructure/gitlab-terraform-gke-argocd)\n- [Getting started with ArgoCD](https://argo-cd.readthedocs.io/en/release-2.0/getting_started/)\n\n### Related posts\n- [Simple Kubernetes management with GitLab](https://about.gitlab.com/blog/simple-kubernetes-management-with-gitlab/)\n- [How to provision ReviewOps](https://about.gitlab.com/blog/how-to-provision-reviewops/)\n- [The ultimate guide to GitOps with GitLab](https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab/)\n",[9,1122,109,680,1123],"GKE","solutions architecture",{"slug":1125,"featured":6,"template":683},"quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform","content:en-us:blog:quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform.yml","Quick Setup Of A Gke Cluster With Argocd Pre Installed Using Terraform","en-us/blog/quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform.yml","en-us/blog/quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform",{"_path":1131,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1132,"content":1138,"config":1145,"_id":1147,"_type":14,"title":1148,"_source":16,"_file":1149,"_stem":1150,"_extension":19},"/en-us/blog/removing-tags-from-small-saas-runner-on-linux",{"title":1133,"description":1134,"ogTitle":1133,"ogDescription":1134,"noIndex":6,"ogImage":1135,"ogUrl":1136,"ogSiteName":671,"ogType":672,"canonicalUrls":1136,"schema":1137},"Removing tags from our small SaaS runner on Linux","With GitLab 17.0, we are removing most tags from our small SaaS runner on Linux. Find out if you are affected and the change you need to make.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669863/Blog/Hero%20Images/security-pipelines.jpg","https://about.gitlab.com/blog/removing-tags-from-small-saas-runner-on-linux","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Removing tags from our small SaaS runner on Linux\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabriel Engel\"}],\n        \"datePublished\": \"2023-08-15\",\n      }",{"title":1133,"description":1134,"authors":1139,"heroImage":1135,"date":1141,"body":1142,"category":1098,"tags":1143},[1140],"Gabriel Engel","2023-08-15","\nIn GitLab 17.0, we are updating the tags of our [small SaaS runner on Linux](https://docs.gitlab.com/ee/ci/runners/saas/linux_saas_runner.html) to be consistent with our other Linux runners.\n\n## Who will be affected?\nIf you are using the small SaaS runner on Linux with any tag other than `saas-linux-small-amd64`, you will be affected as all other tags such as `docker` or `linux` will be deprecated. Job configurations that use a deprecated tag will become stuck.\n\nAn example job configuration that will be stuck could look like this:\n\n```yaml\ntest-invalid-tag:\n  stage: test\n  tags:\n  - docker\n  - linux\n  script:\n    - echo \"I'm affected and will be stuck after 17.0\"\n```\n\n![Stuck Job](https://about.gitlab.com/images/blogimages/2023-08-02-removing-tags-from-our-small-saas-runner-on-linux/stuck-job.png)\n\nThe small SaaS runner on Linux is configured to run untagged jobs; this remains unchanged.\nSo, if you're using the small Linux runner but haven't specified a tag, the behavior of your job will not change.\n\n## How to avoid jobs getting stuck\n\nTo avoid jobs getting stuck after the 17.0 release, you should change the tag in your `.gitlab-ci.yaml` file to `saas-linux-small-amd64`.\n\nAn example job configuration that will work:\n\n```yaml\ntest-correct-tag:\n  stage: test\n  tags:\n  - saas-linux-small-amd64\n  script:\n    - echo \"I'm running as expected\"\n```\n\nAnother example that will work is to define no tag, so the runner will pick up an untagged job:\n\n```yaml\ntest-untagged:\n  stage: test\n  script:\n    - echo \"I'm running as expected\"\n```\n\n## References\n\n- [What are SaaS runners?](https://docs.gitlab.com/ee/ci/runners/)\n- [SaaS runners on Linux documentation](https://docs.gitlab.com/ee/ci/runners/saas/linux_saas_runner.html)\n- [Tags - '.gitlab-ci.yml' Keyword Reference](https://docs.gitlab.com/ee/ci/yaml/#tags)\n",[109,9,1144],"product",{"slug":1146,"featured":6,"template":683},"removing-tags-from-small-saas-runner-on-linux","content:en-us:blog:removing-tags-from-small-saas-runner-on-linux.yml","Removing Tags From Small Saas Runner On Linux","en-us/blog/removing-tags-from-small-saas-runner-on-linux.yml","en-us/blog/removing-tags-from-small-saas-runner-on-linux",{"_path":1152,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1153,"content":1159,"config":1165,"_id":1167,"_type":14,"title":1168,"_source":16,"_file":1169,"_stem":1170,"_extension":19},"/en-us/blog/set-up-flux-for-gitops-on-openshift",{"title":1154,"description":1155,"ogTitle":1154,"ogDescription":1155,"noIndex":6,"ogImage":1156,"ogUrl":1157,"ogSiteName":671,"ogType":672,"canonicalUrls":1157,"schema":1158},"Set up Flux for GitOps to deploy workloads on OpenShift","Learn how to set up a sample project, complete a bootstrap Flux installation, and authenticate your installation with a project deploy token.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682825/Blog/Hero%20Images/genericworkflow.jpg","https://about.gitlab.com/blog/set-up-flux-for-gitops-on-openshift","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Set up Flux for GitOps to deploy workloads on OpenShift\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bart Zhang\"}],\n        \"datePublished\": \"2023-07-05\",\n      }",{"title":1154,"description":1155,"authors":1160,"heroImage":1156,"date":1162,"body":1163,"category":767,"tags":1164},[1161],"Bart Zhang","2023-07-05","\n\nIn February, we announced that [Flux CD would be our recommended approach to do GitOps with GitLab](https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab/). This tutorial explains how to set up GitLab and Flux to deploy workloads on Red Hat OpenShift. You’ll set up a sample project, complete a bootstrap Flux installation, and authenticate your installation with a project deploy token. By the end of this tutorial, you should be able to deploy an example NGINX workload to OpenShift from a GitLab Repo via Flux.\n\nYou can find the fully configured tutorial project in [this GitLab repository](https://gitlab.com/gitlab-partner-demos/red-hat-demos/flux). It works in conjunction with [this repository](https://gitlab.com/gitlab-partner-demos/red-hat-demos/web-app-manifests), which contains the example OpenShift manifest. \n\n### To set up Flux for GitOps:\n1. Create a personal access token\n2. Create the Flux repository\n3. Create the OpenShift manifest repository\n4. Configure Flux to sync your manifests\n5. Verify your configuration\n\n### Prerequisites:\nYou must have an OpenShift cluster running. Cluster-admin privileges are required to install Flux on OpenShift, which can either be installed via OperatorHub or the CLI.\n\nWhen installing Flux with CLI, you need to set the nonroot SCC for all controllers in the flux-system namespace like this:\n\n```\nNS=\"flux-system\"\noc adm policy add-scc-to-user nonroot system:serviceaccount:${NS}:kustomize-controller\noc adm policy add-scc-to-user nonroot system:serviceaccount:${NS}:helm-controller\noc adm policy add-scc-to-user nonroot system:serviceaccount:${NS}:source-controller\noc adm policy add-scc-to-user nonroot system:serviceaccount:${NS}:notification-controller\noc adm policy add-scc-to-user nonroot system:serviceaccount:${NS}:image-automation-controller\noc adm policy add-scc-to-user nonroot system:serviceaccount:${NS}:image-reflector-controller\n```\nExpected output:\n```\nclusterrole.rbac.authorization.k8s.io/system:openshift:scc:nonroot added: \"kustomize-controller\"\nclusterrole.rbac.authorization.k8s.io/system:openshift:scc:nonroot added: \"helm-controller\"\nclusterrole.rbac.authorization.k8s.io/system:openshift:scc:nonroot added: \"source-controller\"\nclusterrole.rbac.authorization.k8s.io/system:openshift:scc:nonroot added: \"notification-controller\"\nclusterrole.rbac.authorization.k8s.io/system:openshift:scc:nonroot added: \"image-automation-controller\"\nclusterrole.rbac.authorization.k8s.io/system:openshift:scc:nonroot added: \"image-reflector-controller\"\n```\n\nAlso, you'll need to [patch your Kustomization](https://access.redhat.com/documentation/en-us/openshift_container_platform/4.8/html/security_and_compliance/seccomp-profiles) to remove the SecComp Profile and enforce runUserAs to the same UID provided by the images to prevent OpenShift to alter the user expected by our controllers, prior to bootstrapping the cluster.\n\nYou’ll need to create a Git repository and clone it locally. I chose to create [the web-app-manifests repository](https://gitlab.com/gitlab-partner-demos/red-hat-demos/web-app-manifests) to store my manifest file once it is created through the following steps.\n\nCreate the file structure required by bootstrap using the following command:\n\n```\ngit clone https://gitlab.com/gitlab-partner-demos/red-hat-demos/flux/\ncd flux\nmkdir -p clusters/my-cluster/flux-system\ntouch clusters/my-cluster/flux-system/gotk-components.yaml \\\n    clusters/my-cluster/flux-system/gotk-sync.yaml \\\n    clusters/my-cluster/flux-system/kustomization.yaml\n```\n\nAdd the following YAML snippet and its patches section to flux/clusters/my-cluster/flux-system/kustomization.yaml:\n\n```\napiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nresources:\n  - gotk-components.yaml\n  - gotk-sync.yaml\npatches:\n  - patch: |\n      apiVersion: apps/v1\n      kind: Deployment\n      metadata:\n        name: all\n      spec:\n        template:\n          spec:\n            containers:\n              - name: manager\n                securityContext:\n                  runAsUser: 65534\n                  seccompProfile:\n                    $patch: delete      \n    target:\n      kind: Deployment\n      labelSelector: app.kubernetes.io/part-of=flux\n```\n\nCommit and push the changes to main branch:\n\n```\ncd ~/flux\ngit add -A && git commit -m \"init flux for openshift\" && git push\n```\n\n### Create a personal access token\n\nTo authenticate with the Flux CLI, you must create a GitLab personal access token ([PAT](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html)) with the api scope:\n1. In the upper-right corner, select your avatar.\n2. Select Edit profile.\n3. On the left sidebar, select Access Tokens.\n4. Enter a name and expiry date for the token.\n5. Select the api scope.\n6. Select Create personal access token.\n7. Copy the new token to your clipboard.\n\nNote: You can also use a project or group access token with the api scope.\n\n### Create the Flux repository\nCreate a Git repository, install Flux, and authenticate Flux with your repo in RedHat OpenShift:\n1. Make sure you are logged in as an OpenShift user in your CLI to access your cluster. `oc login` command is useful here.\n2. [Install the Flux CLI](https://fluxcd.io/flux/installation/#bootstrap). You must install Flux v2 or higher. `brew install fluxcd/tap/flux` on Mac OSX. Check your flux version with `flux -v`. Mine is `flux version 2.0.0-rc.1`.\n3. In GitLab, create a new empty project called `flux`. I chose to use [the respository in this readme](https://gitlab.com/gitlab-partner-demos/red-hat-demos/flux/)\n4. From your shell, export a GITLAB_TOKEN environment variable with the value of your personal access token. For example, `export GITLAB_TOKEN=\u003Cpersonal-access-token>`.\n5. Run the bootstrap command. The exact command depends on whether you are creating the Flux repository under a GitLab user, group, or subgroup. For more information, see the Flux bootstrap documentation.\n\nIn this tutorial, you’re working with a public project in a subgroup. The bootstrap command looks like this:\n\n```\ncd ~/flux\nflux bootstrap gitlab \\\n  --owner=gitlab-partner-demos/red-hat-demos \\\n  --repository=flux \\\n  --branch=master \\\n  --path=clusters/my-cluster \\\n  --token-auth\n```\nExpected output:\n```\n► connecting to https://gitlab.com\n► cloning branch \"master\" from Git repository \"https://gitlab.com/gitlab-partner-demos/red-hat-demos/flux.git\"\n✔ cloned repository\n► generating component manifests\n✔ generated component manifests\n✔ component manifests are up to date\n► installing components in \"flux-system\" namespace\n✔ installed components\n✔ reconciled components\n► determining if source secret \"flux-system/flux-system\" exists\n✔ source secret up to date\n► generating sync manifests\n✔ generated sync manifests\n✔ sync manifests are up to date\n► applying sync manifests\n✔ reconciled sync configuration\n◎ waiting for Kustomization \"flux-system/flux-system\" to be reconciled\n```\n\nThis command installs the Flux agent on the OpenShift cluster and configures it to manage itself from the repository flux-config. The command also automatically creates the project deploy token required to access the flux-config repository.\n\nGreat work! You now have a repository bootstrapped with a Flux configuration. Any updates to your repository are automatically synced to the cluster.\n\n### Create the OpenShift manifest repository\nNext, create a repository for your Flux manifest files. These are stateful files that track the current running configuration by\nthe Flux agent. I chose to use [web-app-manifests](https://gitlab.com/gitlab-partner-demos/red-hat-demos/web-app-manifests) project to track my manifest files.\n1. In GitLab, create a new repository called `web-app-manifests`.\n1. Add a file to web-app-manifests named `nginx-deployment.yaml` with the following contents:\n\n```\napiVersion: apps/v1\n\nkind: Deployment\n\nmetadata:\n  name: nginx-deployment\n  labels:\n    app: nginx\nspec:\n  replicas: 3\n  selector:\n    matchLabels:\n      app: nginx\n  template:\n    metadata:\n      labels:\n        app: nginx\n    spec:\n      containers:\n      - name: nginx-unprivileged\n        image: nginxinc/nginx-unprivileged:latest\n        ports:\n        - containerPort: 80\n```\n\nIn the new `web-app-manifests` repository, create a [GitLab deploy token](https://docs.gitlab.com/ee/user/project/deploy_tokens/) with only the `read_repository` scope.\n\nStore your deploy token username and password somewhere safe. I used environmental variables to save mine:\n\n```\nexport GITLAB_DEPLOY_TOKEN_USER=\u003Cmy-gitlab-deployment-token-username>\nexport GITLAB_DEPLOY_TOKEN_PASS=\u003Cmy-gitlab-deployment-token-password>\nenv |grep GITLAB_DEPLOY_TOKEN\n```\nExpected output:\n```\nGITLAB_DEPLOY_TOKEN_USER=myGitLabUserName\nGITLAB_DEPLOY_TOKEN_PASS=MySecretToken\n```\n\nIn Flux CLI, create a secret with your deploy token and point the secret to the new repository. For example:\n\n```\nflux create secret git flux-deploy-authentication \\\n         --url=https://gitlab.com/gitlab-partner-demos/red-hat-demos/web-app-manifests \\\n         --namespace=default \\\n         --username=$GITLAB_DEPLOY_TOKEN_USER \\\n         --password=$GITLAB_DEPLOY_TOKEN_PASS\n```\nExpected output:\n```\n► git secret 'flux-deploy-authentication' created in 'default' namespace\n```\n\nTo check if your secret was generated successfully, run:\n\n```\noc -n default get secrets flux-deploy-authentication -o yaml\n```\nExpected output:\n```\napiVersion: v1\ndata:\n  password: Base64EncodedPassword=\n  username: Base64EncodedUsername\nkind: Secret\nmetadata:\n  creationTimestamp: \"2023-04-20T18:22:33Z\"\n  name: flux-deploy-authentication\n  namespace: default\n  resourceVersion: \"8168670\"\n  uid: 16292254-83cd-4df2-8a9c-bc4c718e4b4a\ntype: Opaque\n```\n\nUnder data, you should see base64-encoded values associated with your token username and password.\n\nCongratulations! You now have a manifest repository, a deploy token, and a secret generated directly on your cluster.\n\n### Configure Flux to sync your manifests\nNext, tell flux-config to sync with the web-app-manifests repository.\n\nTo do so, create a [GitRepository resource](https://docs.openshift.com/container-platform/3.11/dev_guide/application_lifecycle/new_app.html) in OpenShift:\n\n1. Clone the flux repo to your machine.\n```\n# Remember that we already have the flux repo cloned into our home dir.\ncd ~/flux\ngit pull\n```\n\n2. In your local clone of flux, add the GitRepository file `clusters/my-cluster/web-app-manifests-source.yaml`:\n  \n```\n",[834,9],{"slug":1166,"featured":6,"template":683},"set-up-flux-for-gitops-on-openshift","content:en-us:blog:set-up-flux-for-gitops-on-openshift.yml","Set Up Flux For Gitops On Openshift","en-us/blog/set-up-flux-for-gitops-on-openshift.yml","en-us/blog/set-up-flux-for-gitops-on-openshift",{"_path":1172,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1173,"content":1179,"config":1186,"_id":1188,"_type":14,"title":1189,"_source":16,"_file":1190,"_stem":1191,"_extension":19},"/en-us/blog/setting-up-the-k-agent",{"title":1174,"description":1175,"ogTitle":1174,"ogDescription":1175,"noIndex":6,"ogImage":1176,"ogUrl":1177,"ogSiteName":671,"ogType":672,"canonicalUrls":1177,"schema":1178},"How to deploy the GitLab Agent for Kubernetes with limited permissions"," Learn how to deploy the GitLab Agent for Kubernetes with Limited Permissions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668655/Blog/Hero%20Images/seabass-creatives-U3m4_cKbUfc-unsplash.jpg","https://about.gitlab.com/blog/setting-up-the-k-agent","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy the GitLab Agent for Kubernetes with limited permissions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2021-09-10\",\n      }",{"title":1174,"description":1175,"authors":1180,"heroImage":1176,"date":1182,"body":1183,"category":1184,"tags":1185},[1181],"Fernando Diaz","2021-09-10","\n\nThe [GitLab Agent for Kubernetes (`agentk`)](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent) is an active in-cluster component for solving GitLab and Kubernetes integration tasks in a secure and cloud-native way. The `agentk` communicates to the GitLab Agent Server (KAS) to perform [GitOps](https://about.gitlab.com/topics/gitops/) operations.\n\nIn many examples, we see the agent being deployed with global-level permissions on your Kubernetes cluster. There are use cases where we want to reduce the scope of what agentk has access to. In this guide I will provide information on deploying agentk on your cluster, limiting what namespaces it can access, as well as using it to deploy your applications.\n\nPrefer a video? Watch the walkthrough below to learn how to deploy agentk to your cluster:\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube-nocookie.com/embed/Sr3X5-O9HWA\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n## How it works\n\nAnytime a developer performs changes to a manifest file managed within GitLab, the agentk will apply these changes to the Kubernetes cluster.\n\n![Kagent flowchart](https://about.gitlab.com/images/blogimages/kagent-limited/1.png){: .shadow.medium}\nHow a change to a manifest file in GitLab is applied to the Kubernetes cluster.\n{: .note.text-center}\n\nThe `agentk` and the KAS use bidirectional streaming to allow the connection acceptor (the gRPC server, GitLab Agent Server) to act as a client. The connection acceptor sends requests as gRPC replies.\n\n![Bidirectional streaming flowchart](https://about.gitlab.com/images/blogimages/kagent-limited/2.png){: .shadow.medium}\nHow bidirectional streaming with agentk works.\n{: .note.text-center}\n\n- GitLab RoR is the main GitLab application. It uses gRPC to talk to kas.\n\n- `agentk` is the GitLab Agent for Kubernetes. It keeps a connection established to a\nkas instance, waiting for requests to process. It may also actively send information\nabout things happening in the cluster.\n\n- KAS is the GitLab Agent Server, and is responsible for:\n  - Accepting requests from agentk\n  - Authentication of requests from agentk by querying GitLab RoR\n  - Fetching the agent's configuration file from a corresponding Git repository by querying Gitaly\n  - Matching incoming requests from GitLab RoR with existing connections from the right agentk, forwarding requests to it, and forwarding responses back\n  - Polling manifest repositories for GitOps support by communicating with Gitaly\n\n## How to deploy the GitLab Agent\n\nIn order to deploy the agent, we require the following:\n\n- Kubernetes cluster (I am using Google Kubernetes Engine, or GKE)\n- The GitLab project which will hold the agentk configuration and deployment manifest, you can import [Simple Agent K](https://gitlab.com/tech-marketing/devsecops/kubernetes-agent/simple-agent-k) which includes an application and CICD configured\n\n**Note:** The agentk configuration file and deployment manifests can be located in different projects. It just depends how you want to organize the GitOps workflow.\n\n**1. Create `.gitlab/agent/agent-name/config.yaml` directory in your project** and replace `agent-name` with whatever you want to name your agent.\n\n  ```\n  gitops:\n    manifest_projects:\n    - id: \"Your Project ID\"\n      paths:\n      - glob: '/manifests/*.{yaml,yml,json}'\n  ```\n\n  Remember to replace `Your Project ID` with the projectID of your project, seen below:\n\n   ![Replace projectID for your project](https://about.gitlab.com/images/blogimages/kagent-limited/3.png){: .shadow.medium}\n   Fill in the projectID section with your information.\n   {: .note.text-center}\n\n  **Note:** You can also use the path to the project in GitLab, i.e., mygroup/mysub/myproject.\n\n**2. Create agent record in GitLab**\n\n  A GitLab Rails Agent record is used to associate the cluster with the configuration repository project.\n\n  - Go to **Infrastructure > Kubernetes** tab\n\n   ![Click Kubernetes cluster tab](https://about.gitlab.com/images/blogimages/kagent-limited/4.png){: .shadow.medium}\n   Click the Kubernetes cluster tab in GitLab.\n   {: .note.text-center}\n\n  - Click on the **GitLab Agent managed clusters** tab\n\n   ![Click GitLab Agent tab](https://about.gitlab.com/images/blogimages/kagent-limited/5.png){: .shadow.medium}\n   What the GitLab Agent tab looks like\n   {: .note.text-center}\n\n  - Click the **Install a new GitLab Agent** button\n\n   ![Click Install new GitLab Agent button](https://about.gitlab.com/images/blogimages/kagent-limited/5.png){: .shadow.medium}\n   What the \"Install new GitLab agent\" button looks like.\n   {: .note.text-center}\n\n  - Select your agent\n\n   ![How to select your agent in GitLab](https://about.gitlab.com/images/blogimages/kagent-limited/6.png){: .shadow.medium}\n   How to select your agent in GitLab\n   {: .note.text-center}\n\n  - Save the provided token\n\n   ![How to save your provided token](https://about.gitlab.com/images/blogimages/kagent-limited/7.png){: .shadow.medium}\n   Click here to save your provided token.\n   {: .note.text-center}\n\n**3. Open a Terminal window**\n\n**4. Scope kubectl to your cluster**\n\n  ```\n  $ gcloud container clusters get-credentials fern-gitops-2 --zone us-central1-c --project group-cs-9b54eb\n\n  Fetching cluster endpoint and auth data.\n  kubeconfig entry generated for fern-gitops-2.\n  ```\n\n**5. Create the namespace for the Kubernetes agent**\n\n  ```\n  $ kubectl create ns gitlab-kubernetes-agent\n\n  namespace/gitlab-kubernetes-agent created\n  ```\n\n**6. Create agent secret**\n\n  This secret is used to store the token needed to configure the agent.\n\n  ```\n  $ kubectl create secret generic -n gitlab-kubernetes-agent gitlab-kubernetes-agent-token --from-literal=token='YOUR_AGENT_TOKEN'\n\n  secret/gitlab-kubernetes-agent-token created\n  ```\n\n**7. Apply the agentk deployment with limited access**\n\n  In this deployment below, we will create the following:\n\n### Namespaces\n\n  - **gitlab-kubernetes-agent**: Where the agent will be deployed\n  - **dude**: A namespace where agentk has permission to deploy\n  - **naww**: A namespace where the agentk has no permissions\n\n### Service accounts\n\n  - **gitlab-kubernetes-agent**: Service account used for running agentk\n\n### Deployments\n\n  - **gitlab-kubernetes-agent**: The actual agentk client application\n\n### Cluster roles and bindings\n\n  - **gitlab-kubernetes-agent-write-cm:** Permission for agentk to write all configmaps on the cluster\n  - **gitlab-kubernetes-agent-read-cm:** Permission for agentk to read all configmaps on the cluster\n\n### Roles and bindings\n\n  - **gitlab-kubernetes-agent-write**: Permission for agentk to write all resources on gitlab-kubernetes-agent ns\n  - **gitlab-kubernetes-agent-read**: Permission for agentk to read all resources on gitlab-kubernetes-agent ns\n  - **gitlab-kubernetes-agent-write-dude**: Permission for agentk to write all resources on dude ns\n  - **gitlab-kubernetes-agent-read-dude**: Permission for agentk to read all resources on dude ns\n\nThe next step is to create the deployment file `agentk.yaml`:\n\n  ```\n  apiVersion: v1\n  kind: Namespace\n  metadata:\n    name: dude\n  ---\n  apiVersion: v1\n  kind: Namespace\n  metadata:\n    name: naww\n  ---\n  apiVersion: v1\n  kind: ServiceAccount\n  metadata:\n    name: gitlab-kubernetes-agent\n    namespace: gitlab-kubernetes-agent\n  ---\n  apiVersion: apps/v1\n  kind: Deployment\n  metadata:\n    name: gitlab-kubernetes-agent\n    namespace: gitlab-kubernetes-agent\n  spec:\n    replicas: 1\n    selector:\n      matchLabels:\n        app: gitlab-kubernetes-agent\n    template:\n      metadata:\n        labels:\n          app: gitlab-kubernetes-agent\n        namespace: gitlab-kubernetes-agent\n      spec:\n        serviceAccountName: gitlab-kubernetes-agent\n        containers:\n        - name: agent\n          image: \"registry.gitlab.com/gitlab-org/cluster-integration/gitlab-agent/agentk:stable\"\n          args:\n          - --token-file=/config/token\n          - --kas-address\n          - wss://kas.gitlab.com # for GitLab.com users, use this KAS.\n          volumeMounts:\n          - name: token-volume\n            mountPath: /config\n        volumes:\n        - name: token-volume\n          secret:\n            secretName: gitlab-kubernetes-agent-token\n    strategy:\n      type: RollingUpdate\n      rollingUpdate:\n        maxSurge: 0\n        maxUnavailable: 1\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: ClusterRole\n  metadata:\n    name: gitlab-kubernetes-agent-write-cm\n  rules:\n  - resources:\n    - 'configmaps'\n    apiGroups:\n    - ''\n    verbs:\n    - create\n    - update\n    - delete\n    - patch\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: ClusterRoleBinding\n  metadata:\n    name: gitlab-kubernetes-agent-write-binding-cm\n  roleRef:\n    name: gitlab-kubernetes-agent-write-cm\n    kind: ClusterRole\n    apiGroup: rbac.authorization.k8s.io\n  subjects:\n  - name: gitlab-kubernetes-agent\n    kind: ServiceAccount\n    namespace: gitlab-kubernetes-agent\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: ClusterRole\n  metadata:\n    name: gitlab-kubernetes-agent-read-cm\n  rules:\n  - resources:\n    - 'configmaps'\n    apiGroups:\n    - ''\n    verbs:\n    - get\n    - list\n    - watch\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: ClusterRoleBinding\n  metadata:\n    name: gitlab-kubernetes-agent-read-binding-cm\n  roleRef:\n    name: gitlab-kubernetes-agent-read-cm\n    kind: ClusterRole\n    apiGroup: rbac.authorization.k8s.io\n  subjects:\n  - name: gitlab-kubernetes-agent\n    kind: ServiceAccount\n    namespace: gitlab-kubernetes-agent\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: Role\n  metadata:\n    namespace: gitlab-kubernetes-agent\n    name: gitlab-kubernetes-agent-write\n  rules:\n  - resources:\n    - '*'\n    apiGroups:\n    - '*'\n    verbs:\n    - create\n    - update\n    - delete\n    - patch\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: RoleBinding\n  metadata:\n    namespace: gitlab-kubernetes-agent\n    name: gitlab-kubernetes-agent-write-binding\n  roleRef:\n    name: gitlab-kubernetes-agent-write\n    kind: Role\n    apiGroup: rbac.authorization.k8s.io\n  subjects:\n  - name: gitlab-kubernetes-agent\n    kind: ServiceAccount\n    namespace: gitlab-kubernetes-agent\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: Role\n  metadata:\n    namespace: gitlab-kubernetes-agent\n    name: gitlab-kubernetes-agent-read\n  rules:\n  - resources:\n    - '*'\n    apiGroups:\n    - '*'\n    verbs:\n    - get\n    - list\n    - watch\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: RoleBinding\n  metadata:\n    namespace: gitlab-kubernetes-agent\n    name: gitlab-kubernetes-agent-read-binding\n  roleRef:\n    name: gitlab-kubernetes-agent-read\n    kind: Role\n    apiGroup: rbac.authorization.k8s.io\n  subjects:\n  - name: gitlab-kubernetes-agent\n    kind: ServiceAccount\n    namespace: gitlab-kubernetes-agent\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: Role\n  metadata:\n    namespace: dude\n    name: gitlab-kubernetes-agent-write-dude\n  rules:\n  - resources:\n    - '*'\n    apiGroups:\n    - '*'\n    verbs:\n    - create\n    - update\n    - delete\n    - patch\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: RoleBinding\n  metadata:\n    namespace: dude\n    name: gitlab-kubernetes-agent-write-binding-dude\n  roleRef:\n    name: gitlab-kubernetes-agent-write-dude\n    kind: Role\n    apiGroup: rbac.authorization.k8s.io\n  subjects:\n  - name: gitlab-kubernetes-agent\n    kind: ServiceAccount\n    namespace: gitlab-kubernetes-agent\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: Role\n  metadata:\n    namespace: dude\n    name: gitlab-kubernetes-agent-read-dude\n  rules:\n  - resources:\n    - '*'\n    apiGroups:\n    - '*'\n    verbs:\n    - get\n    - list\n    - watch\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: RoleBinding\n  metadata:\n    namespace: dude\n    name: gitlab-kubernetes-agent-read-binding-dude\n  roleRef:\n    name: gitlab-kubernetes-agent-read-dude\n    kind: Role\n    apiGroup: rbac.authorization.k8s.io\n  subjects:\n  - name: gitlab-kubernetes-agent\n    kind: ServiceAccount\n    namespace: gitlab-kubernetes-agent\n  ```\n\nNow we can apply the deployment with the following command:\n\n  ```\n  $ kubectl apply -f k-agent.yaml\n\n  namespace/dude created\n  namespace/naww created\n  serviceaccount/gitlab-kubernetes-agent created\n  deployment.apps/gitlab-kubernetes-agent created\n  clusterrole.rbac.authorization.k8s.io/gitlab-kubernetes-agent-write-cm created\n  clusterrolebinding.rbac.authorization.k8s.io/gitlab-kubernetes-agent-write-binding-cm created\n  clusterrole.rbac.authorization.k8s.io/gitlab-kubernetes-agent-read-cm created\n  clusterrolebinding.rbac.authorization.k8s.io/gitlab-kubernetes-agent-read-binding-cm created\n  role.rbac.authorization.k8s.io/gitlab-kubernetes-agent-write created\n  rolebinding.rbac.authorization.k8s.io/gitlab-kubernetes-agent-write-binding created\n  role.rbac.authorization.k8s.io/gitlab-kubernetes-agent-read created\n  rolebinding.rbac.authorization.k8s.io/gitlab-kubernetes-agent-read-binding created\n  role.rbac.authorization.k8s.io/gitlab-kubernetes-agent-write-dude created\n  rolebinding.rbac.authorization.k8s.io/gitlab-kubernetes-agent-write-binding-dude created\n  role.rbac.authorization.k8s.io/gitlab-kubernetes-agent-read-dude created\n  rolebinding.rbac.authorization.k8s.io/gitlab-kubernetes-agent-read-binding-dude created\n  ```\n\n  **Note:** You see we are giving permissions to the gitlab-kubernetes-agent on the `dude` namespace, but not on the `naww` namespace. Currently, permissions for ConfigMaps are necessary but the scope can be reduced.\n\n**8. Make sure agentk is running**\n\n  ```\n  $ kubectl get pods -n gitlab-kubernetes-agent\n\n  NAME                            READY   STATUS    RESTARTS   AGE\n  gitlab-agent-58869d96bd-nqqnf   1/1     Running   0          10s\n  ```\n\nNow that the agentk is deployed, it can start managing our Kubernetes deployments.\n\n## Managing deployments\n\nNow let's go back to the GitLab UI, and add some applications to deploy using GitOps.\n\n**1. Open the Web IDE and create a manifest folder in your project root**\n\n**2. Add a manifest file for what you want to deploy on the `dude` namespace, name it `dude.yaml`**\n\n  ```\n  apiVersion: apps/v1\n  kind: Deployment\n  metadata:\n    name: nginx-deployment-dude\n    namespace: dude  # Can be any namespace managed by you that the agent has access to.\n  spec:\n    selector:\n      matchLabels:\n        app: nginx\n    replicas: 1\n    template:\n      metadata:\n        labels:\n          app: nginx\n      spec:\n        containers:\n        - name: nginx\n          image: nginx:1.14.2\n          ports:\n          - containerPort: 80\n  ```\n\n**3. Add a manifest file for what you want to deploy on the `naww` namespace and name it `naww.yaml`**\n\n  ```\n  apiVersion: apps/v1\n  kind: Deployment\n  metadata:\n    name: nginx-deployment-naww\n    namespace: naww  # Can be any namespace managed by you that the agent has access to.\n  spec:\n    selector:\n      matchLabels:\n        app: nginx\n    replicas: 1\n    template:\n      metadata:\n        labels:\n          app: nginx\n      spec:\n        containers:\n        - name: nginx\n          image: nginx:1.14.2\n          ports:\n          - containerPort: 80\n  ```\n\n**4. Commit changes and wait for the pipeline to run**\n\n**5. Check dude namespace**\n\n  ```\n  $ kubectl get pods -n dude\n\n  NAME                                     READY   STATUS    RESTARTS   AGE\n  nginx-deployment-dude-66b6c48dd5-rpxx2   1/1     Running   0          6m22s\n  ```\n\n  Notice that the application has deployed.\n\n**6. Check naww namespace**\n\n  ```\n  $ kubectl get pods -n naww\n\n  No resources found in naww namespace.\n  ```\n\n  Notice there is nothing on there.\n\n**7. Look at the k-agent logs**\n\n  ```\n  $ kubectl get pods -n gitlab-kubernetes-agent\n\n  NAME                            READY   STATUS    RESTARTS   AGE\n  gitlab-agent-58869d96bd-nqqnf   1/1     Running   0          10s\n\n  $ kubectl logs gitlab-agent-58869d96bd-nqqnf -n gitlab-kubernetes-agent\n\n  {\"level\":\"info\",\"time\":\"2021-08-19T19:17:26.088Z\",\"msg\":\"Feature status change\",\"feature_name\":\"tunnel\",\"feature_status\":true}\n  {\"level\":\"info\",\"time\":\"2021-08-19T19:17:26.088Z\",\"msg\":\"Observability endpoint is up\",\"mod_name\":\"observability\",\"net_network\":\"tcp\",\"net_address\":\"[::]:8080\"}\n  {\"level\":\"info\",\"time\":\"2021-08-19T19:17:26.375Z\",\"msg\":\"Starting synchronization worker\",\"mod_name\":\"gitops\",\"project_id\":\"devsecops/gitops-project\"}\n  ...\n  ```\n\n  You should see logs as follows:\n\n  Application successfully deployed to `dude`\n\n  ```\n  {\"level\":\"info\",\"time\":\"2021-08-20T22:03:57.561Z\",\"msg\":\"Synchronizing objects\",\"mod_name\":\"gitops\",\"project_id\":\"29010173\",\"agent_id\":711,\"commit_id\":\"221499beaf2dcf267cd40324235570001e928817\"}\n  {\"eventType\":\"resourceStatus\",\"group\":\"apps\",\"kind\":\"Deployment\",\"message\":\"Deployment is available. Replicas: 1\",\"name\":\"nginx-deployment-dude\",\"namespace\":\"dude\",\"status\":\"Current\",\"timestamp\":\"2021-08-20T22:03:58Z\",\"type\":\"status\"}\n  ```\n\n  Application failed to deploy to `naww`\n\n  ```\n  {\"eventType\":\"resourceStatus\",\"group\":\"apps\",\"kind\":\"Deployment\",\"message\":\"\",\"name\":\"nginx-deployment-naww\",\"namespace\":\"naww\",\"status\":\"Unknown\",\"timestamp\":\"2021-08-20T22:03:29Z\",\"type\":\"status\"}\n  {\"level\":\"warn\",\"time\":\"2021-08-20T22:03:30.015Z\",\"msg\":\"Synchronization failed\",\"mod_name\":\"gitops\",\"project_id\":\"29010173\",\"agent_id\":711,\"commit_id\":\"221499beaf2dcf267cd40324235570001e928817\",\"error\":\"1 resources failed\"}\n  ```\n\nWe can see that deployments only happen on the `dude` namespace because that is all the k-agent has access to. You can add access to other namespaces by creating [Roles and RoleBindings](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) for each namespace like we did for the `dude` namespace.\n\n## Securing GitOps workflow on Kubernetes\n\nNow you have seen how you can create a more restrictive GitOps workflow, allowing you to meet your security needs.\n\nThanks for reading! I hope this guide brings you one step forward into using and securing your GitOps workflow on Kubernetes. For more information see the [GitLab Agent documentation](https://docs.gitlab.com/ee/user/clusters/agent/).\n\nPhoto by \u003Ca href=\"https://unsplash.com/@sebbb?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">seabass creatives\u003C/a> on \u003Ca href=\"https://unsplash.com/s/photos/limited?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>\n{: .note}\n\n## Read more on Kubernetes:\n\n- [How to install and use the GitLab Kubernetes Operator](/blog/gko-on-ocp/)\n\n- [Threat modeling the Kubernetes Agent: from MVC to continuous improvement](/blog/threat-modeling-kubernetes-agent/)\n\n- [A new era of Kubernetes integrations on GitLab.com](/blog/gitlab-kubernetes-agent-on-gitlab-com/)\n\n- [Understand Kubernetes terminology from namespaces to pods](/blog/kubernetes-terminology/)\n\n- [What we learned after a year of GitLab.com on Kubernetes](/blog/year-of-kubernetes/)\n","devsecops",[1056,9,702],{"slug":1187,"featured":6,"template":683},"setting-up-the-k-agent","content:en-us:blog:setting-up-the-k-agent.yml","Setting Up The K Agent","en-us/blog/setting-up-the-k-agent.yml","en-us/blog/setting-up-the-k-agent",{"_path":1193,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1194,"content":1200,"config":1205,"_id":1207,"_type":14,"title":1208,"_source":16,"_file":1209,"_stem":1210,"_extension":19},"/en-us/blog/simple-kubernetes-management-with-gitlab",{"title":1195,"description":1196,"ogTitle":1195,"ogDescription":1196,"noIndex":6,"ogImage":1197,"ogUrl":1198,"ogSiteName":671,"ogType":672,"canonicalUrls":1198,"schema":1199},"Simple Kubernetes management with GitLab","Follow our tutorial to provision a Kubernetes cluster and manage it with IAC using Terraform and Helm in 20 minutes or less.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670037/Blog/Hero%20Images/auto-deploy-google-cloud.jpg","https://about.gitlab.com/blog/simple-kubernetes-management-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Simple Kubernetes management with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2022-11-15\",\n      }",{"title":1195,"description":1196,"authors":1201,"heroImage":1197,"date":1202,"body":1203,"category":767,"tags":1204},[1117],"2022-11-15","\n\nKubernetes can be very complex and has dozens of tutorials out there on how to provision and manage a cluster. This tutorial aims to provide a simple, lightweight solution to provision a Kubernetes cluster and manage it with infrastructure as code (IaC) using Terraform and Helm in 20 minutes or less.\n\n**The final product of this tutorial will be two IaC repositories with fully functional CI/CD pipelines:**\n\n1. [gitlab-terraform-k8s](https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-eks) - A single source of truth to provision, configure, and manage your Kubernetes infrastructure using Terraform\n1. [cluster-management](https://gitlab.com/gitlab-org/project-templates/cluster-management) - A single source of truth to define the desired state of your Kubernetes cluster using the GitLab Agent for Kubernetes and Helm\n\n![Final Product](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/final-product.png){: .shadow}\n\n\n### Prerequisites\n- AWS or GCP account with permissions to provision resources\n- GitLab account \n- Access to a GitLab Runner\n- 20 minutes\n\n### An overview of this tutorial is as follows:\n\n1. Set up the GitLab Terraform Kubernetes Template 🏗️\n2. Register the GitLab Agent 🕵️\n3. Add in Cloud Credentials ☁️🔑\n4. Set up the Kubernetes Cluster Management Template 🚧\n5. Enjoy your Kubernetes Cluster completely managed in code! 👏\n\n## Set up the GitLab Terraform Kubernetes Template\n\nStart by importing the example project by URL - [https://gitlab.com/projects/new#import_project](https://gitlab.com/projects/new#import_project)\n\nTo import the project:\n\n1. In GitLab, on the top bar, select **Main menu > Projects > View all projects**.\n2. On the right of the page, select **New project**.\n3. Select **Import project**.\n4. Select **Repository by URL**.\n5. For the Git repository URL:\n- [GCP Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine): https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-gke.git\n- [AWS Elastic Kubernetes Service](https://aws.amazon.com/eks/): https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-eks.git\n6. Complete the fields and select **Create project**.\n\n## Register the GitLab Agent\n\nWith your newly created **gitlab-terraform-k8s** repo, create a GitLab Agent for Kubernetes:\n\n1. On the left sidebar, select **Infrastructure > Kubernetes clusters**. Select **Connect a cluster (agent).**\n2. From the **Select an agent** dropdown list, select **eks-agent/gke-agent and select **Register an agent**.\n3. GitLab generates a registration token for the agent. **Securely store this secret token, as you will need it later.**\n4. GitLab provides an address for the agent server (KAS). Securely store this as you will also need it later.\n5. Add this to the **gitlab-terraform-eks/.gitlab/agents/eks-agent/config.yaml** in order to allow the GitLab Agent to have access to your entire group.\n\n```yaml\nci_access:\n  groups:\n    - id: your-namespace-here\n```\n\n![Register GitLab Agent](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/register-gitlab-agent.png){: .shadow}\n\n\n## Add in your Cloud Credentials to CI/CD variables\n\n### [AWS EKS](https://aws.amazon.com/eks/)\n\nOn the left sidebar, select **Settings > CI/CD. Expand Variables**.\n1. Set the variable **AWS_ACCESS_KEY_ID** to your AWS access key ID.\n2. Set the variable **AWS_SECRET_ACCESS_KEY** to your AWS secret access key.\n3. Set the variable **TF_VAR_agent_token** to the agent token displayed in the previous task.\n4. Set the variable **TF_VAR_kas_address** to the agent server address displayed in the previous task.\n\n![Add in CI/CD variables](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/cicd-variables.png){: .shadow}\n\n\n### [GCP GKE](https://cloud.google.com/kubernetes-engine)\n\n1. To authenticate GCP with GitLab, create a GCP service account with the following roles: **Compute Network Viewer, Kubernetes Engine Admin, Service Account User, and Service Account Admin**. Both User and Admin service accounts are necessary. The User role impersonates the default service account when creating the node pool. The Admin role creates a service account in the kube-system namespace.\n2. **Download the JSON file** with the service account key you created in the previous step.\n3. On your computer, encode the JSON file to base64 (replace /path/to/sa-key.json to the path to your key):\n\n```\nbase64 -i /path/to/sa-key.json | tr -d\n```\n\n- Use the output of this command as the **BASE64_GOOGLE_CREDENTIALS** environment variable in the next step.\n\nOn the left sidebar, select **Settings > CI/CD. Expand Variables**.\n5. Set the variable **BASE64_GOOGLE_CREDENTIALS** to the base64 encoded JSON file you just created.\n6. Set the variable **TF_VAR_gcp_project** to your GCP’s project name.\n7. Set the variable **TF_VAR_agent_token** to the agent token displayed in the previous task.\n8. Set the variable **TF_VAR_kas_address** to the agent server address displayed in the previous task.\n\n## Run GitLab CI to deploy your Kubernetes cluster!\n\n![Deploy Kubernetes cluster](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/pipeline-view.png){: .shadow}\n\nWhen successfully completed, view the cluster in the AWS/GCP console!\n\n![AWS EKS](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/aws-eks.png){: .shadow}\n\n### You are halfway done! 👏 Keep it up!\n\n## Set up the Kubernetes Cluster Management Project\n\nCreate a project from the cluster management project template - [https://gitlab.com/projects/new#create_from_template](https://gitlab.com/projects/new#create_from_template)\n\n1. In GitLab, on the top bar, select **Main menu > Projects > View all projects**.\n2. On the right of the page, select **New project**.\n3. Select **Create from template**.\n4. From the list of templates, next to **GitLab Cluster Management**, select **Use template**.\n5. Enter the project details. Ensure this project is created in the same namespace as the gitlab-terraform-k8s project.\n6. Select **Create project**.\n7. Once the project is created on the left sidebar, select **Settings > CI/CD. Expand Variables**.\n8. Set the variable KUBE_CONTEXT to point to the GitLab Agent. For example, `noah-ing-demos/infrastructure/gitlab-terraform-eks:eks-agent`.\n\n![Set Kube Context](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/kube-config.png){: .shadow}\n\n\n- **Uncomment the applications you'd like to be installed** into your Kubernetes cluster in the **helmfile.yaml**. In this instance I chose ingress, cert-manager, prometheus, and Vault. \n\n![Uncomment Applications in helmfile](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/helmfile.png){: .shadow}\n\nThat will trigger your **CI/CD pipeline** and it should look like this.\n\n![Cluster Management CI/CD](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/cluster-management-cicd.png){: .shadow}\n\nOnce completed, **go to the AWS/GCP console** and check out all the deployed resources!\n\n![Deployed EKS applications](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/deployed-eks-applications.png){: .shadow}\n\n### Voila! 🎉\n\n## Enjoy your Kubernetes cluster completely defined in code! 👏👏👏\n\nNow with these two repositories you can **manage a Kubernetes cluster entirely through code**:\n\n- For managing the Kubernetes cluster's infrastructure and configuring its resources you can make changes to the [gitlab-terraform-eks](https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-eks) repository you have setup. This project has a **Terraform CI/CD pipeline** that will allow you to **review, provision, configure, and manage your Kubernetes** infrastructure with ease.\n\n- For managing the desired state of the Kubernetes cluster, the [cluster-management](https://gitlab.com/gitlab-org/project-templates/cluster-management) repository has a **GitLab Agent** set up and will **deploy any Kubernetes objects defined in the helm files**.\n\n➡️ Bonus: If you'd like to deploy your own application to the Kubernetes cluster, then add to your **cluster-management** `helmfile` and see the GitLab Agent for Kubernetes roll it out with ease!\n\n\n## References\n- [Create a New GKE Cluster](https://docs.gitlab.com/ee/user/infrastructure/clusters/connect/new_gke_cluster.html)\n- [Create a New EKS Cluster](https://docs.gitlab.com/ee/user/infrastructure/clusters/connect/new_eks_cluster.html)\n- [Cluster Management Project](https://docs.gitlab.com/ee/user/clusters/management_project.html)\n\n\n## Related posts\n- [The ultimate guide to GitOps with GitLab](https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab/)\n- [GitOps with GitLab: Infrastructure provisioning with GitLab and Terraform](https://about.gitlab.com/blog/gitops-with-gitlab-infrastructure-provisioning/)\n- [GitOps with GitLab: Connect with a Kubernetes cluster](https://about.gitlab.com/blog/gitops-with-gitlab-connecting-the-cluster/)\n",[834,812,9,1034,811,769],{"slug":1206,"featured":6,"template":683},"simple-kubernetes-management-with-gitlab","content:en-us:blog:simple-kubernetes-management-with-gitlab.yml","Simple Kubernetes Management With Gitlab","en-us/blog/simple-kubernetes-management-with-gitlab.yml","en-us/blog/simple-kubernetes-management-with-gitlab",{"_path":1212,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1213,"content":1219,"config":1224,"_id":1226,"_type":14,"title":1227,"_source":16,"_file":1228,"_stem":1229,"_extension":19},"/en-us/blog/the-kubecon-summary-from-a-product-perspective",{"title":1214,"description":1215,"ogTitle":1214,"ogDescription":1215,"noIndex":6,"ogImage":1216,"ogUrl":1217,"ogSiteName":671,"ogType":672,"canonicalUrls":1217,"schema":1218},"How what we learned at KubeCon EU 2022 will impact our product roadmaps","Platform integrations and secrets management are among our product team's primary takeaways. Find out why.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097776/Blog/Hero%20Images/Blog/Hero%20Images/2_2.png_1750097776369.png","https://about.gitlab.com/blog/the-kubecon-summary-from-a-product-perspective","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How what we learned at KubeCon EU 2022 will impact our product roadmaps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-05-31\",\n      }",{"title":1214,"description":1215,"authors":1220,"heroImage":1216,"date":1221,"body":1222,"category":1184,"tags":1223},[786],"2022-05-31","\nAfter two years of only virtual KubeCon events, the GitLab product team was excited to participate in and meet colleagues, partners, and more from our industry at KubeCon EU 2022, held in Valencia, Spain. We were present with four product leaders, a software developer, and a UX researcher. This post summarizes our primary takeaways from the conference, an experience that will affect our roadmaps.\n\nWe will discuss the following topics:\n\n- Internal platforms and GitOps\n- Secrets management\n- Infrastructure integrations\n- WebAssembly a.k.a. WASM\n\nThere were 32 topic types and several 0-day events at KubeCon. Many talks focused on a few tools. Many Cloud Native Computing Foundation ([CNCF](https://www.cncf.io/)) projects had their community meetings during these days. Some talks were given IRL, and others were broadcast virtually with live Q&A. There were a variety of topics and approaches. There were many talks about the various aspects of cluster management, too. However, we left this topic out on purpose because at GitLab we want to focus on the software developers and provide one DevOps platform to support their work. Cluster management is one step away from this focus. Still, we noticed some remarkable patterns as highlighted by the four elements of our list.\n\n> You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.\n\n## Internal platforms and GitOps\n\nCompanies want their developers to focus on their core business. They create internal platforms to hide the complexity of Day 0-2 operations from their software engineers and still allow the \"shift left\" movement of DevOps. These platforms often involve the welding of several tools.\n\nMany talks presented how the given team or company approached their platform problem and what tools they used, and one could often feel the 18-month sweat of a whole platform team trying to come up with a solution.\n\nThese platforms use either a push- or pull-based model for deployments. No single approach is emerging due to legacy applications and different requirements. While there is a definition of GitOps provided by the [OpenGitOps](https://opengitops.dev/) initiative, several presenters offered their own definitions, including of pull-based deployments.\n\nWe fielded a large-scale survey related to secrets at KubeCon, and learned that users would like help with the [Pipeline Authoring](/direction/verify/pipeline_composition/) workflow.\n\nBesides the wiring of the tools, the industry is still looking for a unified approach to multi-tenancy (there might not be one), and sometimes integrating security processes seems overly challenging.\n\n### How does this affect our roadmap?\n\nThere is a lot of potential in building a platform used as the starting point for internal platforms. Imagine a \"tool\" that shortens the time required to create an internal platform to days or weeks instead of a whole year. This is the GitLab vision of The One DevOps platform.\n\nAs a result, we don't plan any changes in our direction. We will continue investing in the recently started [Deployment direction](/direction/delivery/) to provide all the building blocks for a platform in a single tool and are already actively looking for integrated experiences across our offering.\n\nWe’re working on a CI/CD Component Catalog that includes CI templates. This will [support the Pipeline Authoring workflow](https://gitlab.com/groups/gitlab-org/-/epics/7462).\n\n## Secrets management\n\nOne of the things that often came up in our discussions is secrets management. We fielded a large-scale survey related to secrets at KubeCon, and attendees were glad that we’re thinking about this topic. Security is part of the DevOps discussion, and secrets management is a serious issue, especially in a cloud-native aspect.\n\n- Jenkins, GitHub and GitLab were all mentioned during the secret management discussions.\n- Users would like to offload the secrets management responsibility to another product. In many cases, their security requirements are strict, so they don't want/can't handle secrets by themselves.\n- Hashicorp Vault is a preferred tool (primarily in large enterprise companies working in finance or government) to manage and handle secrets. At the same time, most companies would like to avoid operating one more application in their stack.\n- Open ID Connect [OIDC](https://docs.gitlab.com/ee/integration/openid_connect_provider.html) with the JSON web token (JWT) is an essential direction for us.\n\n### How does this affect our roadmap?\n\nWe should invest more in secrets management since this is a pain our customers would like us to solve, and it's becoming a nonstarter feature for many organizations.\n\nWe want to advance in three main vectors:\n\n- Improve our existing secrets management solution - although we don't have a clear solution, we should improve our current variables capabilities to include additional features that could help users leverage variables for secrets. So it would be a \"good enough\" feature they can use. We are actively working toward this direction by removing some of the limitations we have around [variables and masking](https://gitlab.com/groups/gitlab-org/-/epics/1994).\n- Improve our existing [Hashicorp Vault integration](https://docs.gitlab.com/ee/ci/examples/authenticating-with-hashicorp-vault/) using the JWT token, allowing us to integrate with additional vendors (AWS, AZURE, GCP). Like the previous point, we are moving toward this direction by supporting OIDC and [adding audience claims to our JWT token](https://gitlab.com/groups/gitlab-org/-/epics/7335).\n- We need to develop [a clear strategy for a built-in secrets management solution](/direction/govern/pipeline_security/secrets_management/#next-9-12-monhts). In order to provide our users/customers with choice, GitLab wants to use Hashicorp Vault for secrets management handling. We believe that our approach should be not to build the logic ourselves but to leverage an open source, [cloud native](/topics/cloud-native/) project that we could build into GitLab.\n\n## Infrastructure integrations\n\nInfrastructure integrations came in several flavors during the talks. Some are about cluster management, that is not our focus in this blog. Several presentations show that internal platforms need a strong infrastructure aspect, too. When a new project/microservice is started, it might require a new namespace in the cluster with associated RBAC and policies, optionally storage, a source code management repo with automation, and the appropriate permissions. Deployments might create ephemeral environments or could modify the underlying environment within predefined constraints.\n\nThe top tools mentioned in this area are:\n\n- Terraform\n- Crossplane\n- Pulumi\n\n### How does this affect our roadmap?\n\nGitLab already has [great integrations for Terraform](https://docs.gitlab.com/ee/user/infrastructure/iac/), and the other tools are on our radar, too.\n\nWe are open to integrations but cannot currently prioritize the other integrations on our own. We hope that the community will be interested in contributing to benefit everyone.\n\nBuilding Docker containers might not be necessary to get easy-to-manage container binaries. WASM runtimes become available for Kubernetes, and many programming languages can natively compile to WASM. WASM can provide a secure runtime environment without Docker and might be able to simplify the toolchain developers need to learn.\n\nWe don't plan to add direct WASM support to GitLab yet. The generic package registry can hold WASM modules while their deployment is up to the user.\n\nAt the same time, we see a lot of potential in simple runtime environments built around WASM. While GitLab is not in the business of offering runtime services, we will be actively monitoring the market. We might look into more WASM integrations as we see more demand and tools and services maturing in this space.\n\n## GitLab feedback\n\nIt's great to work on a product where the overall sentiment is positive, both from customers that intensely rely on it and from attendees that have to use other tools but would love to use GitLab or just started to play with it recently.\n\nWe received the following notable mentions as feedback:\n\n- Stability and reliability improved over the last several months.\n- Users love our documentation (primarily around CI) - they mentioned it's easy to use and get started with.\n- Given the size of GitLab and the number of our users, we received feedback about long-outstanding issues. We were happy to respond that we are addressing at least some of them shortly.\n- Several customers had asked if we got some resources for migrating from Jenkins to GitLab.\n- A few customers mentioned that they had to move away from GitLab mainly because of an upper-level decision despite favouring GitLab.\n\n## Conclusions\n\n![The GitLab team](https://about.gitlab.com/images/blogimages/kubecon-gitlab-team.jpg)\n\nWe enjoyed all the talks and were delighted to meet and speak with our users and customers. Thanks to all of you, we could \"feel the pulse\" on how we are doing and validate our direction.\n\nWe hope that this blog will guide those who could not [attend KubeCon](https://about.gitlab.com/events/kubecon/) and serve as a summary for those who did attend. All the recordings will likely be available on YouTube from Jun 6, 2022.\n\nLet us know in the comments if you think we missed some important direction.\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality.\nIt is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n",[812,1034,811,9,813,769],{"slug":1225,"featured":6,"template":683},"the-kubecon-summary-from-a-product-perspective","content:en-us:blog:the-kubecon-summary-from-a-product-perspective.yml","The Kubecon Summary From A Product Perspective","en-us/blog/the-kubecon-summary-from-a-product-perspective.yml","en-us/blog/the-kubecon-summary-from-a-product-perspective",{"_path":1231,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1232,"content":1237,"config":1242,"_id":1244,"_type":14,"title":1245,"_source":16,"_file":1246,"_stem":1247,"_extension":19},"/en-us/blog/the-ultimate-guide-to-gitops-with-gitlab",{"title":1233,"description":1234,"ogTitle":1233,"ogDescription":1234,"noIndex":6,"ogImage":865,"ogUrl":1235,"ogSiteName":671,"ogType":672,"canonicalUrls":1235,"schema":1236},"The ultimate guide to GitOps with GitLab","This eight-part tutorial series demonstrates how to use GitLab as a best-in-class GitOps tool.","https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The ultimate guide to GitOps with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-04-07\",\n      }",{"title":1233,"description":1234,"authors":1238,"heroImage":865,"date":1239,"body":1240,"category":767,"tags":1241},[786],"2022-04-07","\n\nIt is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. [GitOps](/topics/gitops/) is an operational framework that takes DevOps best practices used for application development such as version control, collaboration, compliance, and CI/CD tooling, and applies them to infrastructure automation. This series of easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them, that can be solved by pairing GitOps with GitLab.\n\nHere are 8 tutorials on how to do GitOps with GitLab:\n\n**1. [Here's how to do GitOps with GitLab](/blog/gitops-with-gitlab/)**\n\nThis tutorial sets the stage for what you will learn throughout the series, including the tech concepts you'll need to know.\n\n**2. [Infrastructure provisioning with GitLab and Terraform](/blog/gitops-with-gitlab-infrastructure-provisioning/)**\n\nThis tutorial walks you through setting up the underlying infrastructure using GitLab and Terraform.\n\n**3. [Connect with a Kubernetes cluster](/blog/gitops-with-gitlab-connecting-the-cluster/)**\n\nThis tutorial demonstrates how to connect a Kubernetes cluster with GitLab for pull- and push-based deployments and easy security integrations.\n\n**4. [How to tackle secrets management](/blog/gitops-with-gitlab-secrets-management/)**\n\nThis tutorial builds on the previous tutorial to show you how to use a Kubernetes cluster connection to manage secrets within a cluster.\n\n**5. [The CI/CD tunnel](/blog/gitops-with-gitlab-using-ci-cd/)**\n\nThis tutorial introduces you to CI/CD tunnels and shows step-by-step how to access a Kubernetes cluster using GitLab CI/CD.\n\n**6. [Connecting GitLab with a Kubernetes cluster - Auto DevOps](/blog/gitops-with-gitlab-auto-devops/)**\n\nThis tutorial looks at how you can use Auto DevOps with all its bells and whistles to easily manage deployments.\n\n**7. [Connecting GitLab with a Kubernetes cluster for GitOps-style application delivery](/blog/gitops-with-gitlab/)**\n\nThis tutorial shows you how to connect an application project to a manifest project for controlled, GitOps-style deployments.\n\n**8. [Turn a GitLab agent for Kubernetes installation to manage itself](/blog/gitops-with-gitlab-manage-the-agent/)**\n\nThis tutorial is the culmination of the previous tutorials and will teach you how to turn a GitLab agent for Kubernetes installation to manage itself.\n\n\n**Read more about GitOps:**\n- [GitLab for GitOps](/solutions/gitops/)\n- [What is GitOps](/topics/gitops/)\n- [GitOps viewed as part of the Ops evolution](/blog/gitops-as-the-evolution-of-operations/)\n- [How to use a push-based approach for GitOps with GitLab scripting and variables](/blog/how-to-agentless-gitops-vars/)\n\n\n\n\n",[769,834,9],{"slug":1243,"featured":6,"template":683},"the-ultimate-guide-to-gitops-with-gitlab","content:en-us:blog:the-ultimate-guide-to-gitops-with-gitlab.yml","The Ultimate Guide To Gitops With Gitlab","en-us/blog/the-ultimate-guide-to-gitops-with-gitlab.yml","en-us/blog/the-ultimate-guide-to-gitops-with-gitlab",{"_path":1249,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1250,"content":1256,"config":1265,"_id":1267,"_type":14,"title":1268,"_source":16,"_file":1269,"_stem":1270,"_extension":19},"/en-us/blog/tracking-down-missing-tcp-keepalives",{"title":1251,"description":1252,"ogTitle":1251,"ogDescription":1252,"noIndex":6,"ogImage":1253,"ogUrl":1254,"ogSiteName":671,"ogType":672,"canonicalUrls":1254,"schema":1255},"Tracking TCP Keepalives: Lessons in Docker, Golang & GitLab","An in-depth recap of debugging a bug in the Docker client library.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680874/Blog/Hero%20Images/network.jpg","https://about.gitlab.com/blog/tracking-down-missing-tcp-keepalives","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What tracking down missing TCP Keepalives taught me about Docker, Golang, and GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stan Hu\"}],\n        \"datePublished\": \"2019-11-15\",\n      }",{"title":1257,"description":1252,"authors":1258,"heroImage":1253,"date":1260,"body":1261,"category":767,"tags":1262},"What tracking down missing TCP Keepalives taught me about Docker, Golang, and GitLab",[1259],"Stan Hu","2019-11-15","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2019-12-03.\n{: .alert .alert-info .note}\n\nWhat began as failure in a GitLab static analysis check led to a\ndizzying investigation that uncovered a subtle [bug in the Docker client\nlibrary code](https://github.com/docker/for-linux/issues/853) used by\nthe GitLab Runner. We ultimately worked around the problem by upgrading\nthe Go compiler, but in the process we uncovered an unexpected change in\nthe Go TCP keepalive defaults that fixed an issue with Docker and GitLab\nCI.\n\nThis investigation started on October 23, when backend engineer [Luke\nDuncalfe](/company/team/#.luke) mentioned, \"I'm seeing\n[`static-analysis` failures with no output](https://gitlab.com/gitlab-org/gitlab/-/jobs/331174397).\nIs there something wrong with this job?\" He opened [a GitLab\nissue](https://gitlab.com/gitlab-org/gitlab/issues/34951) to discuss.\n\nWhen Luke ran the static analysis check locally on his laptop, he saw\nuseful debugging output when the test failed. For example, an extraneous\nnewline would accurately be reported by Rubocop. However, when the same\ntest ran in GitLab's automated test infrastructure, the test failed\nquietly:\n\n![Failed job](https://about.gitlab.com/images/blogimages/docker-tcp-keepalive-debug/job-failure.png){: .shadow.center}\n\nNotice how the job log did not include any clues after the `bin/rake\nlint:all` step. This made it difficult to determine whether a real\nproblem existed, or whether this was just a flaky test.\n\nIn the ensuing days, numerous team members reported the same problem.\nNothing kills productivity like silent test failures.\n\n## Was something wrong with the test itself?\n\nIn the past, we had seen that if that specific test generated enough\nerrors, [the output buffer would fill up, and the continuous integration\n(CI) job would lock\nindefinitely](https://gitlab.com/gitlab-org/gitlab-foss/issues/61432). We\nthought we had [fixed that issue months\nago](https://gitlab.com/gitlab-org/gitlab-foss/merge_requests/28402). Upon\nfurther review, that fix seemed to eliminate any chance of a thread\ndeadlock.\n\nDid we have to flush the buffer? No, because the Linux kernel will do\nthat for an exiting process already.\n\n## Was there a change in how CI logs were handled?\n\nWhen a test runs in GitLab CI, the [GitLab\nRunner](https://gitlab.com/gitlab-org/gitlab-runner/) launches a Docker\ncontainer that runs commands specified by a `.gitlab-ci.yml` inside the\nproject repository. As the job runs, the runner streams the output to\nthe GitLab API via PATCH requests. The GitLab backend saves this data\ninto a file. The following sequence diagram shows how this works:\n\n```plantuml\n== Get a job! ==\nRunner -> GitLab: POST /api/v4/jobs/request\nGitLab -> Runner: 201 Job was scheduled\n\n== Job sends logs (1 of 2) ==\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\nGitLab -> File: Save to disk\nGitLab -> Runner: 202 Accepted\n\n== Job sends logs (2 of 2) ==\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\nGitLab -> File: Save to disk\nGitLab -> Runner: 202 Accepted\n```\n\n[Henrich Lee Yu](/company/team/#engwan) mentioned\nthat we had recently [disabled a feature flag that changed how GitLab\nhandled CI job\nlogs](https://docs.gitlab.com/ee/administration/job_logs.html#new-incremental-logging-architecture). [The\ntiming seemed to line\nup](https://gitlab.com/gitlab-org/gitlab/issues/34951#note_236723888).\n\nThis feature, called live CI traces, eliminates the need for a shared\nPOSIX filesystem (e.g., NFS) when saving job logs to disk by:\n\n1. Streaming data into memory via Redis\n2. Persisting the data in the database (PostgreSQL)\n3. Archiving the final data into object storage\n\nWhen this flag is enabled, the flow of CI job logs looks something like\nthe following:\n\n```plantuml\n== Get a job! ==\nRunner -> GitLab: POST /api/v4/jobs/request\nGitLab -> Runner: 201 Job was scheduled\n\n== Job sends logs ==\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\nGitLab -> Redis: Save chunk\nGitLab -> Runner: 202 Accepted\n...\n== Copy 128 KB chunks from Redis to database ==\nGitLab -> Redis: GET gitlab:ci:trace:id:chunks:0\nGitLab -> PostgreSQL: INSERT INTO ci_build_trace_chunks\n...\n== Job finishes ==\n\nRunner -> GitLab: PUT /api/v4/job/:id\nGitLab -> Runner: 200 Job was updated\n\n== Archive trace to object storage ==\n```\n\nLooking at the flow diagram above, we see that this approach has more\nsteps. After receiving data from the runner, something could have gone\nwrong with handling a chunk of data. However, we still had many\nquestions:\n\n1. Did the runners send the right data in the first place?\n1. Did GitLab drop a chunk of data somewhere?\n1. Did this new feature actually have anything to do with the problem?\n1. Are they really making another Gremlins movie?\n\n## Reproducing the bug: Simplify the `.gitlab-ci.yml`\n\nTo help answer those questions, we simplified the `.gitlab-ci.yml` to\nrun only the `static-analysis` step. We inserted a known Rubocop error,\nreplacing a `eq` with `eql`. We first ran this test on a separate GitLab\ninstance with a private runner. No luck there – the job showed the right\noutput:\n\n```\nOffenses:\n\nee/spec/models/project_spec.rb:55:42: C: RSpec/BeEql: Prefer be over eql.\n        expect(described_class.count).to eql(2)\n                                         ^^^\n\n12669 files inspected, 1 offense detected\n```\n\nHowever, we repeated the test on our staging server and found that we\nreproduced the original problem. In addition, the live CI trace feature\nflag had been activated on staging. Since the problem occurred with and\nwithout the feature, we could eliminate that feature as a possible\ncause.\n\nPerhaps something with the GitLab server environment caused a\nproblem. For example, could the load balancers be rate-limiting the\nrunners? As an experiment, we pointed a private runner at the staging\nserver and re-ran the test. This time, it succeeded: the output was\nshown. That seemed to suggest that the problem had more to do with the\nrunner than with the server.\n\n## Docker Machine vs. Docker\n\nOne key difference between the two tests: One runner used a shared,\nautoscaled runner using a [Docker\nMachine](https://docs.docker.com/machine/overview/) executor, and the\nprivate runner used a [Docker\nexecutor](https://docs.gitlab.com/runner/executors/docker.html).\n\nWhat does Docker Machine do exactly? The following diagram may help\nillustrate:\n\n![Docker Machine](https://docs.docker.com/machine/img/machine.png){: .medium.center}\n\nThe top-left shows a local Docker instance. When you run Docker from the\ncommand-line interface (e.g., `docker attach my-container`), the program\njust makes [REST calls to the Docker Engine\nAPI](https://docs.docker.com/engine/api/v1.40/).\n\nThe rest of the diagram shows how Docker Machine fits into the\npicture. Docker Machine is an entirely separate program. The GitLab\nRunner shells out to `docker-machine` to create and destroy virtual\nmachines using cloud-specific (e.g. Amazon, Google, etc.) drivers. Once\na machine is running, the runner then uses the Docker Engine API to run,\nwatch, and stop containers.\n\nNote that this API is used securely over an HTTPS connection. This is an\nimportant difference between the Docker Machine executor and Docker\nexecutor: The former needs to communicate across the network, while the\nlatter can either use a local TCP socket or UNIX domain socket.\n\n## Google Cloud Platform timeouts\n\nWe've known for a while that Google Cloud [has a 10-minute idle\ntimeout](https://cloud.google.com/compute/docs/troubleshooting/general-tips),\nwhich has caused issues in the past:\n\n> Note that idle connections are tracked for a maximum of 10 minutes,\n> after which their traffic is subject to firewall rules, including the\n> implied deny ingress rule. If your instance initiates or accepts\n> long-lived connections with an external host, you should adjust TCP\n> keep-alive settings on your Compute Engine instances to less than 600\n> seconds to ensure that connections are refreshed before the timeout\n> occurs.\n\nWas the problem caused by this timeout? With the Docker Machine\nexecutor, we found that we could reproduce the problem with a simple\n`.gitlab-ci.yml`:\n\n```yaml\nimage: \"busybox:latest\"\n\ntest:\n  script:\n    - date\n    - sleep 601\n    - echo \"Hello world!\"\n    - date\n    - exit 1\n```\n\nThis would reproduce the failure, where we would never see the `Hello\nworld!` output. Changing the `sleep 601` to `sleep 599` would make the\nproblem go away. Hurrah! All we have to do is tweak the system TCP\nkeepalives, right? Google provided these sensible settings:\n\n```sh\nsudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5\n```\n\nHowever, enabling these kernel-level settings didn't solve the\nproblem. Were keepalives even being sent? Or was there some other issue?\nWe turned our attention to network traces.\n\n## Eavesdropping on Docker traffic\n\nIn order to understand what was happening, we needed to be able to\nmonitor the network communication between the runner and the Docker\ncontainer. But how exactly does the GitLab Runner stream data from a\nDocker container to the GitLab server?  The following diagram\nillustrates the flow:\n\n```plantuml\nRunner -> Docker: POST /containers/name/attach\nDocker -> Runner: \u003Ccontainer output>\nDocker -> Runner: \u003Ccontainer output>\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\nGitLab -> File: Save to disk\nGitLab -> Runner: 202 Accepted\n```\n\nFirst, the runner makes a [POST request to attach to the container\noutput](https://docs.docker.com/engine/api/v1.40/#operation/ContainerAttach).\nAs soon as a process running in the container outputs some data, Docker\nwill transmit the data over this HTTPS stream. The runner then copies\nthis data to GitLab via the PATCH request.\n\nHowever, as mentioned earlier, traffic between a GitLab Runner and the\nremote Docker machine is encrypted over HTTPS on port 2376. Was there an\neasy way to disable HTTPS? Searching through the code of Docker Machine,\nwe found that it did not appear to be supported out of the box.\n\nSince we couldn't disable HTTPS, we had two ways to eavesdrop:\n\n1. Use a man-in-the-middle proxy (e.g. [mitmproxy](https://mitmproxy.org/))\n1. Record the traffic and decrypt the traffic later using the private keys\n\n## Ok, let's be the man-in-the-middle!\n\nThe first seemed more straightforward, since [we already had experience\ndoing this with the Docker\nclient](https://docs.gitlab.com/ee/administration/packages/container_registry.html#running-the-docker-daemon-with-a-proxy).\n\nHowever, after [defining the proxy variables for GitLab\nRunner](https://docs.gitlab.com/runner/configuration/proxy.html#adding-proxy-variables-to-the-runner-config),\nwe found we were only able to intercept the GitLab API calls with\n`mitmproxy`. The Docker API calls still went directly to the remote\nhost. Something wasn't obeying the proxy configuration, but we didn't\ninvestigate further. We tried the second approach.\n\n## Decrypting TLS data\n\nTo decrypt TLS data, we would need to obtain the encryption keys. Where\nwere these located for a newly-created system with `docker-machine`? It\nturns out `docker-machine` worked in the following way:\n\n1. Call the Google Cloud API to create a new machine\n1. Create a `/root/.docker/machine/machines/:machine_name` directory\n1. Generate a new SSH keypair\n1. Install the SSH key on the server\n1. Generate a new TLS certificate and key\n1. Install and configure Docker on the newly-created machine with TLS certificates\n\nAs long as the machine runs, the directory will contain the information\nneeded to decode this traffic. We ran `tcpdump` and saved the private keys.\n\nOur first attempt at decoding the traffic failed. Wireshark could not\ndecode the encrypted traffic, although general TCP traffic could still\nbe seen. Researching more, we found out why: If the encrypted traffic\nused a [Diffie-Hellman key\nexchange](https://en.wikipedia.org/wiki/Diffie%E2%80%93Hellman_key_exchange),\nhaving the private keys would not suffice! This is by design, a property\ncalled [perfect forward\nsecrecy](https://en.m.wikipedia.org/wiki/Forward_secrecy).\n\nTo get around that limitation, we modified the GitLab Runner to disable\ncipher suites that used the Diffie-Hellman key exchange:\n\n```diff\ndiff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go\nindex 6b4c6a7c0..a3f86d756 100644\n",[267,1056,9,1034,1263,1034,1264,746,1014],"google","AWS",{"slug":1266,"featured":6,"template":683},"tracking-down-missing-tcp-keepalives","content:en-us:blog:tracking-down-missing-tcp-keepalives.yml","Tracking Down Missing Tcp Keepalives","en-us/blog/tracking-down-missing-tcp-keepalives.yml","en-us/blog/tracking-down-missing-tcp-keepalives",{"_path":1272,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1273,"content":1279,"config":1285,"_id":1287,"_type":14,"title":1288,"_source":16,"_file":1289,"_stem":1290,"_extension":19},"/en-us/blog/velocity-with-confidence",{"title":1274,"description":1275,"ogTitle":1274,"ogDescription":1275,"noIndex":6,"ogImage":1276,"ogUrl":1277,"ogSiteName":671,"ogType":672,"canonicalUrls":1277,"schema":1278},"How GitLab 14 satisfies the need for speed with modern DevOps","GitLab 14: Ship with velocity, ship with confidence","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682089/Blog/Hero%20Images/racecar_devops.jpg","https://about.gitlab.com/blog/velocity-with-confidence","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab 14 satisfies the need for speed with modern DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Parker Ennis\"}],\n        \"datePublished\": \"2021-07-29\",\n      }",{"title":1274,"description":1275,"authors":1280,"heroImage":1276,"date":1282,"body":1283,"category":767,"tags":1284},[1281],"Parker Ennis","2021-07-29","\n\n## How DevOps and NFS changed the game\n\nWhat if I told you that one of the best-selling racing video game franchises of all time, the \"Need For Speed\" (NFS), and DevOps have more in common with each other than you think? Yes, you read that correctly, probably not the NFS (Network File System) you were expecting.\n\n### An appetite for change\n\nFor context, the NFS series originally set out to redefine a saturated, yet unsophisticated, racing video game market. Motivated by an appetite for change, the NFS user experience reflected the human connection to real cars and how they behaved, which was a big challenge for developers in the 1990s. Nearly 30 years ago, \"The Need for Speed\" forever changed the landscape of racing games, selling 150 million copies since its debut.\n\n![The original Need For Speed game from 1994](https://about.gitlab.com/images/blogimages/need_for_speed.png){: .shadow.center}\nThe original Need For Speed video game set a new standard with an appetite for industry change.\n{: .note.text-center}\n\nCoincidentally, it was in 1994 that Grady Booch coined the term \"continuous integration\" (CI). Booch, like NFS, paved the way for immense industry growth in the realm of software development. CI aimed to redefine the manual, time-consuming development processes that paid little mind to how real humans and developers behaved and collaborated around application development by [leveraging automation to increase development speed without sacrificing quality](/topics/ci-cd/benefits-continuous-integration/).\n\nSimilar to how NFS took the racing scene by storm and laid the groundwork for the racing game genre, CI evolved into what is arguably the most important piece of DevOps best practices today: Continuous integration and continuous delivery (CI/CD).\n\nDevOps continues to evolve, but without CI/CD, DevOps isn't the collaborative practice that helps teams work faster and more efficiently. CI/CD is a super power within DevOps – unlocking the potential to ship apps with increased velocity and confidence in their quality, without having to choose one or the other.\n\n### DIY DevOps vs Modern DevOps\n\nToday, it doesn't matter what your business does, it's going to involve some amount of using and building software. DevOps gained traction in the age of digital transformation, where the rate of technical innovation acted as a forcing function for companies to fail or survive. Over the past 10 years or so, organizations had a choice to either embrace this \"need for speed\" and adopt DevOps practices, or be displaced by their competition.\n\nThis scramble led to a \"DIY\" style of DevOps that couldn't deliver on its promises much of the time. For many organizations, the biggest problem wasn't just the brittle toolchains composed of disparate pieces of software but also trying to make these complicated toolchains and processes benefit from DevOps. Since uprooting everything wasn't an option, the root of the problem was still there, and DevOps was hard to adopt.\n\nFor all the teams DevOps has helped, the DevOps marketplace must continuously improve and evolve as we learn more about the challenges of modernizing workflows. DevOps must modernize alongside businesses to ensure it's an accessible and realistic framework for as many companies as possible to leverage.\n\n### GitLab 14 fuels the modern DevOps need for speed\n\nWith a platform-driven approach, [GitLab 14](/releases/2021/06/22/gitlab-14-0-released/) delivers a consistent and efficient developer and operator experience that leads to a simplified and more predictable SDLC. A single user interface, embedded security, and a unified data store are just some of the features of a platform any company can use without the tradeoffs of the DIY DevOps past. By using one tool for source code management, CI, and CD, teams are more efficient and productive with streamlined collaboration. Engineers are happier when focused on value-add than when maintaining integrations – and happy developers help attract and retain talent.\n\n[GitLab 14](/gitlab-14/) ushers in a new era of modern DevOps as a global movement, and I'm excited to talk a little bit about some of its capabilities that help you ship software faster, with a higher degree of confidence, and improve your ability to respond to market changes.\n\n### Ship with velocity and confidence\n\n**1. [GitLab pipeline editor](/releases/2021/01/22/gitlab-13-8-released/#pipeline-editor)**\n\nCrafting pipelines can be complicated and verbose without an understanding of advanced pipeline syntax and how it fits within the workflow using the '.gitlab-ci.yml' configuration file. Needing to craft pipelines from scratch presents a steeper learning curve for organizations and teams with a less mature DevOps culture. The GitLab pipeline editor lowers the barrier to entry for CI/CD novices and accelerates power users with visual authoring and versioning, continuous validation, and pipeline visualization. Whether you're a more advanced user or novice, the pipeline editor unlocks additional power and usability.\n\n![Pipeline editor linting capability makes pipeline authoring easier](https://about.gitlab.com/images/blogimages/lint_ci.png){: .shadow.center}\nPipeline editor linting capability makes pipeline authoring easier and more efficient.\n{: .note.text-center}\n\nHere's what some of our wider community is saying about the pipeline editor:\n\n> \"I really like the direction of making CI/CD more accessible to first-time users and how GitLab rolls out this feature piece by piece.\" - Bernhard Knasmüller, computer scientist\n\n> \"This is going to improve the CI/CD configuration experience greatly!\" - Olivier Jourdan, developer\n\n**2. [GitLab Agent for Kubernetes](https://youtu.be/17O_ARVaRGo)**\n\nThe GitLab Agent for Kubernetes enables secure, cloud-native [GitOps](/solutions/gitops/). GitLab also meets customers where they are by supporting GitOps with agent-based and agentless approaches, and for deployments anywhere, regardless of whether infrastructure is cloud-native. It also enables alerts based on network policies for pull-based deployments.\n\nHere's piece of feedback from the wider GitLab community on the Kubernetes Agent:\n\n> \"GitLab is leading the evolution of DevOps by optimising work efficiency and cloud-native integration capabilities. This enables the rapid delivery of digital value.\" - Vasanth Kandaswamy, Head of Data and Applications Portfolio, Fujitsu Australia\n\nWe look forward to iterating and improving these capabilities and always [welcome your feedback](/submit-feedback/#product-feedback) on our product.\n\n### What's next?\n\nOne thing is for sure: **people want to go fast,** but not when it requires sacrificing peace of mind and quality. We're committed to helping you ship with velocity and confidence by [investing in specific product areas](/direction/#fy22-product-investment-themes) to bring the benefits of modern DevOps to anyone using GitLab to deliver their applications.\n\n![Go fast with confidence](https://about.gitlab.com/images/blogimages/gofast.gif){: .shadow.center}\nEven Ricky Bobby from Talledega Nights agrees. People just want to go fast!\n{: .note.text-center}\n\nWe'll continue executing on our [vision for CI/CD](https://gitlab.com/groups/gitlab-org/-/epics/4534) to create a visual pipeline authoring experience built right into GitLab that simplifies the complexity, letting you quickly create and edit pipelines while still exposing advanced options when you need them.\n\nWe're also committed to making sure you can deploy anytime and anywhere to take advantage of the benefits of Kubernetes, no matter where you are at on your cloud native development journey. If you have feedback or suggestions on what we can do better, please [let us know in our product epic.](https://gitlab.com/groups/gitlab-org/-/epics/3329)\n\nWe look forward to delivering you more value as we iterate upon this new era of GitLab 14 going foward and can't wait to see the great things you're creating with Gitlab.\n\n_This blog is part three in a three-part series on the top capabilities of GitLab 14. Learn more about [how GitLab 14 prepares you for DevSecOps 2.0 in part one](/blog/are-you-ready-for-the-newest-era-of-devsecops/), and about [how to optimize DevOps with GitLab 14's enhanced visibility tools in part two](/blog/optimizing-devops-visibility-in-gitlab-14/)._\n\nCover image by [CHUTTERSNAP](https://unsplash.com/@chuttersnapk) on [Unsplash](https://unsplash.com/photos/5Yo1P9ErikM)\n{: .note}\n",[769,853,1034,811,9],{"slug":1286,"featured":6,"template":683},"velocity-with-confidence","content:en-us:blog:velocity-with-confidence.yml","Velocity With Confidence","en-us/blog/velocity-with-confidence.yml","en-us/blog/velocity-with-confidence",{"_path":1292,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1293,"content":1299,"config":1304,"_id":1306,"_type":14,"title":1307,"_source":16,"_file":1308,"_stem":1309,"_extension":19},"/en-us/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab",{"title":1294,"description":1295,"ogTitle":1294,"ogDescription":1295,"noIndex":6,"ogImage":1296,"ogUrl":1297,"ogSiteName":671,"ogType":672,"canonicalUrls":1297,"schema":1298},"GitOps with GitLab: What you need to know about the Flux CD integration","Inside the decision to integrate Flux CD with the GitLab agent for Kubernetes and what it means to you.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678356/Blog/Hero%20Images/balance-speed-security-devops.jpg","https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: What you need to know about the Flux CD integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2023-02-08\",\n      }",{"title":1294,"description":1295,"authors":1300,"heroImage":1296,"date":1301,"body":1302,"category":767,"tags":1303},[786],"2023-02-08","\n\nIn January, [we decided to integrate Flux CD with the GitLab agent for Kubernetes](https://gitlab.com/gitlab-org/gitlab/-/issues/357947). [Flux CD](https://fluxcd.io/) is a mature GitOps solution and one of the market leaders in the area. We have since decided to make Flux CD our recommended approach to do GitOps with GitLab – previously, the agent for Kubernetes alone was the recommended approach. Let's discuss what this change means for current users and what our plans are for the integration.\n\nFirst of all, let's remove the most worrying thought from the agenda: We are not deprecating any agent for Kubernetes functionality at this point. The GitOps offering remains fully supported and transitions to maintenance mode. We plan to deprecate it with at least one year of removal time once we consider the Flux integration solid. As a result, the removal is unlikely before the GitLab 17.0 release, which is expected in 2024. We are looking into providing tooling to facilitate (or automate) the migration once the time comes. If you use the agent for Kubernetes for GitOps, you don't have to do anything at this time.\n\nThis change does not affect the agent's other non-GitOps functionality either. The [CI/CD pipeline integration](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html) and [operational container scanning](https://docs.gitlab.com/ee/user/clusters/agent/vulnerabilities.html) remain intact, and we will continue investing in them.\n\n## What to expect from this change\n\nFrom now on, instead of building our solution for GitOps, we will focus on supporting Flux and improving its user experience when it is used together with GitLab. Flux CD will become the recommended tool to do GitOps with GitLab. Initially, we will provide documentation on the Flux setup we recommend for our users while we focus on building out various integrations.\n\nIn terms of the integrations, we are looking at providing a UI built into GitLab. You might also be able to use the UI with other tools, including the CI pipeline integration of the agent, but it will work best with deployments managed by Flux. Besides the UI integration, we want to streamline Flux's access management. Flux accesses GitLab through the regular GitLab front door. As a result, it needs to authenticate with a token, requests might be rate-limited, and, in general, it does not seem to be the most efficient way to do its job. We plan to simplify this for our users to avoid the necessity of managing dozens of deploy keys and to decrease the load on GitLab at the same time.\n\n## Why Flux?\n\nWhy did we choose Flux CD instead of something else? We evaluated several options. There are other open-source GitOps tools. The biggest contender was [ArgoCD](https://argoproj.github.io/cd), another mature Cloud Native Computing Foundation project in the GitOps space. ArgoCD is a full-featured product for GitOps, while Flux is a GitOps toolkit. While we like and value ArgoCD a lot, we think it does not lend itself to integration with GitLab.\n\nAs we are already in the process of building out UI integrations with the cluster, we know how the GitLab UI will be able to reach the Kubernetes API. Flux relies on the standard Kubernetes API 100%, so we can easily integrate it into our UI access approach. Relying only on the Kubernetes API is a significant benefit over ArgoCD, which provides a custom API.\n\nBesides going with another tool, we evaluated the work needed to build a competitive, in-house solution. We found in-house development is the strongest contender to Flux CD, and while it was very compelling, we decided to go with the integration instead. We believe this should give our customers more value faster than a custom solution. Moreover, it should enable existing Flux users to benefit from our integrations with minor modifications in their usage patterns as we roll out the integrations.\n\n## What comes next?\n\nFirst, we want to [document our recommendations for using FluxCD with GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/389382). At the same time, we will change our GitOps documentation to recommend Flux instead of the legacy GitOps solution. We consider these the most important steps to minimize uncertainty and set you up for a successful start.\n\nTogether with the above, the team is working hard on shipping the first version of an [integrated Kubernetes UI](https://gitlab.com/gitlab-org/gitlab/-/issues/375449). We are starting with an environment overview and build an [entire Kubernetes dashboard](https://gitlab.com/groups/gitlab-org/-/epics/2493) as part of GitLab. The cluster UI integration will enable GitLab users to learn more about their cluster state without leaving the GitLab UI and should allow a nearly real-time view of GitOps deployments using Flux CD.\n\nWe have clear ideas on how to do what I described above. We are still researching and learning about many other topics, including [how to simplify Flux best accessing GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/389393). If you have experience using Flux with GitLab and have any feedback, recommendations, or requests on what the integration should support, we would like to hear from you. Please, reach out to me using [my GitLab profile](https://gitlab.com/nagyv-gitlab).\n\n## The Flux community\n\nBefore I close this article, I would like to say hi and thank you to the Flux community. We already got invited to the Flux development meeting, and the core team was very welcoming. As we always actively contributed to the core tools – first [`gitops-engine`](https://github.com/argoproj/gitops-engine/), later [`cli-utils`](https://github.com/kubernetes-sigs/cli-utils/) – supporting our GitOps offering, we are looking forward to contributing to Flux CD.\n\nWe are looking forward to working more closely with you. Thank you for building this great tool and community!\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n\nRead more:\n\n- More about the [Flux CD integration decision](https://gitlab.com/gitlab-org/gitlab/-/issues/357947) \n- Docs for [agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html) \n- Issue on [our current focus](https://gitlab.com/gitlab-org/gitlab/-/issues/389382) \n- Preparation issues: [Flux to GitLab access management](https://gitlab.com/gitlab-org/gitlab/-/issues/389393) and [Visualizing Kubernetes resources within the Environments page](https://gitlab.com/gitlab-org/gitlab/-/issues/375449)\n\n",[9,812,769,1098],{"slug":1305,"featured":6,"template":683},"why-did-we-choose-to-integrate-fluxcd-with-gitlab","content:en-us:blog:why-did-we-choose-to-integrate-fluxcd-with-gitlab.yml","Why Did We Choose To Integrate Fluxcd With Gitlab","en-us/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab.yml","en-us/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab",{"_path":1311,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1312,"content":1318,"config":1324,"_id":1326,"_type":14,"title":1327,"_source":16,"_file":1328,"_stem":1329,"_extension":19},"/en-us/blog/administering-gitlab-edu",{"title":1313,"description":1314,"ogTitle":1313,"ogDescription":1314,"noIndex":6,"ogImage":1315,"ogUrl":1316,"ogSiteName":671,"ogType":672,"canonicalUrls":1316,"schema":1317},"Administering your GitLab for Education License","Getting ready for fall semester and wondering how to set up your GitLab License? We've got you covered!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681419/Blog/Hero%20Images/servers_image.jpg","https://about.gitlab.com/blog/administering-gitlab-edu","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Administering your GitLab for Education License\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christina Hupy, Ph.D.\"}],\n        \"datePublished\": \"2020-07-10\",\n      }",{"title":1313,"description":1314,"authors":1319,"heroImage":1315,"date":1321,"body":1322,"category":700,"tags":1323},[1320],"Christina Hupy, Ph.D.","2020-07-10","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n*It is that time of year again!* Faculty and IT administrators are starting to prepare for the arrival of the fall semester and around this same time, we get an influx of questions about how to best manage your GitLab license. We thought it would be helpful to dive into some of the most frequently asked questions here!\n\nBefore we jump into the tips and tricks, here's a bit of information about the **GitLab for Education Program**. The program offers free, unlimited subscriptions of top-tier GitLab plans (Saas or Self-Managed) to qualified entities. [Qualified educational institutions](https://about.gitlab.com/handbook/marketing/developer-relations/community-programs/education-program/#gitlab-for-education-program-requirements) have a primary purpose of teaching enrolled students, may be public or private, must be accredited by an applicable government body, and non-profit. The GitLab for Education license is only available for the purposes of *teaching, learning, and non-commercial academic research*. IT professional use or any administrative use within the institution is not contemplated with the Education license.\n\nPlease note that the remainder of the blog post applies to the licenses granted through our Education Program.\n\nIf you are interested in joining, [apply for the GitLab for Education Program](https://about.gitlab.com/solutions/education/)!\n\n## Who is eligible for a GitLab.com seat with the Education License?\n\nStudents, faculty, and staff directly employed or enrolled at the host institution are eligible to receive a seat under the GitLab for Education license. Collaborators from other institutions or entities cannot be provided seats under the license, unless they have an email address with the same domain as the host institution.\n\n### If external collaborators cannot have a seat, how do we collaborate?\n\nThere are a couple of different options. One option is to request that the collaborator receive an email address through the host institution. This option is a great one if the collaborator is an adjunct professor or a regular ‘volunteer’ (most institutions allow regular volunteers to have an institutional email address). Another option is to have the collaborator create a free GitLab.com account or purchase a higher tier individually. For this option, the collaborator will only have the features available at the tier for which they signed up. This option might work out fine if the external collaborator just needs to access files and provide feedback.  If your collaborators are at a different educational institution, we encourage that institution to sign up for the GitLab for Education Program themselves!\n\nOnce the collaborator has an account, it is very easy to add them to your group or project.\n* You can [add people to a specific project](https://docs.gitlab.com/ee/user/project/members/#add-a-user) or [import users from another project](https://docs.gitlab.com/ee/user/project/members/#import-users-from-another-project).\n* If the user is not on your hosted instance, you can invite them using [their email address](https://docs.gitlab.com/ee/user/project/members/#invite-people-using-their-e-mail-address)\n\n## How are users counted? What happens if we exceed the allotted seats?\nThe seats for your license are generic and are not specific to a user. [GitLab does not use a named license model](https://about.gitlab.com/pricing/licensing-faq/#can-i-use-my-paid-seats-for-different-users). If a user leaves your institution, you can remove or block that user to free a seat. The seat can then be used by another user.\n\nEvery occupied seat, whether by a person, administrator, job, or bot is counted in the subscription.   There are a few exceptions:\n* Members with Guest permission are not counted (in SaaS or self-managed)\n* Ghost Users and Support Bots are not counted in self-managed (Ghost Users are users where the account has been removed but all artifacts remain).\n\nGitLab.com counts concurrent seats not named users. Each user is able to have up to 100 active sessions. To view the number of active sessions or revoke and active session [check these docs](https://docs.gitlab.com/ee/user/profile/active_sessions.html).\n\nIf more seats are used than are available in self-managed GitLab, the administrator may receive a “users over license warning. In this situation, the institution should reach out to GitLab to request additional user seats.  Please see more details in our [licensing FAQs](https://about.gitlab.com/pricing/licensing-faq/#who-gets-counted-in-the-subscription).\n\n## How do we assign accounts in our GitLab instance?\nThere are a few different options for assigning accounts including creating a signup page, adding users in the Admin Area, or the API. Also, you can create users through integrations with LDAP or OmniAuth providers.\n\nFirst, let’s explore creating a user sign up page. The custom user sign-up page is a great way to customize the experience for the users at your institutions. From the Admin Area, the admin can set up several [sign-up restrictions](https://docs.gitlab.com/ee/administration/settings/sign_up_restrictions.html) including enabling or disabling new signups, requiring user email confirmation, and block or allow email addresses from specific domains. The sign-up page itself can be customized with the institution logo, a description of the purposes of the instance, and other guidelines for who is able to create an account and how they can do so.\n\nCustomizing the sign-up page is great to communicate to potential users what the instance is for and how it can be used. For example, many institutions include a note about the department that maintains the instance and who can sign up. Here are some great examples from [Newcastle University](https://mas-gitlab.ncl.ac.uk/users/sign_in), [University of Kent School of Computing](https://git.cs.kent.ac.uk/users/sign_in), and the [University of Birmingham’s BEAR GitLab instance](https://gitlab.bham.ac.uk/users/sign_in). This is also a great place to include compliance information regarding the uses of the license.\n\n## Does GitLab allow users to login in with the Institutional authentication system?\n\n### LDAP/AD\n\n[GitLab supports LDAP for user authentication](https://docs.gitlab.com/ee/administration/auth/ldap/), compatible implementations include Microsoft Active Directory (AD) and Open LDAP. There are other implementations which only support authorization as login, but no additional LDAP features, which are then greyed out.\n\nYou can secure the connection to the LDAP server with TLS, `simple_tls` and `start_tls` are supported.\n\nNote: LDAP authentication and sync requires a self-managed installation of GitLab. This requires administrative permissions not available in GitLab.com SaaS.\n\n#### Syncing Users and Groups for Permissions\n\nRoles and permissions can be organized based on groups which can be synced into GitLab with an Enterprise license. The GitLab administrator can specify the base DN and filters to exclude certain groups and users from the sync with the ‘user_filter’.\n\nExample for filtering for users in a specific group:\n\nuser_filter: `(memberof=CN=gitlab,CN=groups,CN=accounts,DC=office,DC=company,DC=com)`\n\nThe usage of `memberof` will automatically trigger a sync for this group when a user signs in for the first time. The hourly group sync ensures that all permissions are uptodate. The entrypoint for the group sync is `group_base` which is available in GitLab Enterprise Starter+.\n\nCurrently it is not possible to exclude groups from the sync. [MR here](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/3218).\n\n### SSO/SAML\n\nIf your institution has an identity provider such as Shibboleth, Okta, etc. supporting Single-Sign-On capabilities, you are advised to use the generic SAML OmniAuth provider. GitLab then consumes the assertions and maps users accordingly.\n\nFor Kerberos as SSO provider, [check these docs](https://docs.gitlab.com/ee/integration/kerberos.html).\n\nNote: The SAML OmniAuth provider only is available on self-managed GitLab instances. For SAML SSO on GitLab.com SaaS, [please check these docs](https://docs.gitlab.com/ee/user/group/saml_sso/#saml-sso-for-gitlabcom-groups).\n\n### Can students be issued accounts from a bulk list of email addresses?\n\nRather than allowing anyone to sign up or create an account, Administrators can automate user creation of accounts using the REST API. This requires some scripting knowledge. [Here is an example](https://pypi.org/project/gitlab-users/).\n\n### What is the best way to manage student seats?\nInstitutions have the flexibility to determine how many students receive a seat, how long a student is able to retain the seat, and what happens when they graduate. For example, students may sign up for a GitLab seat when they register for a class or become part of a research project. An Institution may decide to allow students to have a seat only during a class or research semester or they may decide to allow the students to have the seats for the entirety of their enrollment. Once the student has a seat, that seat is no longer available in the number of licenses. When the student’s GitLab account is deleted that seat will return to the pool of available seats. Please be aware that when an account is deleted, all projects in the user namespace are also deleted. Additionally, the administrator has multiple options for removing users: delete only the user but maintain their associated records, delete the user and contributions, or to delete the user and their associated records ([see docs](https://docs.gitlab.com/ee/user/profile/account/delete_account.html)). We recommend making this choice with caution. If a student is part of a research project, the team may need to keep issues, notes, and merge requests related to the project.\n\nSince we provide an unlimited number of seats as part of our GitLab for Education Program, we highly encourage institutions to allow students to retain their seat during the entirety of their time at the institution. We encourage students to use GitLab to create a portfolio of their work and contributions while in school. Providing students to retain their account, allows them to build up this body of work while they are in school. The GitLab profile is a great way to showcase how a student has developed skills and made contributions to various projects over time. Prospective employers can visit the profile page and then navigate through the student’s portfolio of work.\n\n![Example Profile](https://about.gitlab.com/images/blogimages/samantha-profile.jpg){: .shadow.medium.center}\nAn exaple of a GitLab Profile with interactive record of contributions.\n{: .note.text-center}\n\nAs students approach graduation, we encourage the institution to provide ample time and sufficient warning before deleting the user account so that students can migrate any relevant material to another repository of their choice. We recommend that students sign up for our free tier of GitLab (self-managed or SaaS) and then begin migrating any relevant content from their projects over to their own personal account.\n\nAdditionally, institutions can consider using the [deactivate](https://docs.gitlab.com/ee/administration/moderate_users.html#activating-and-deactivating-users) or [block user](https://docs.gitlab.com/ee/administration/moderate_users.html#blocking-and-unblocking-users) features to help manage accounts for students who may be nearing graduation.  These options can be combined with a script to check the deactivated or blocked date and then communicate with the API to delete or warn a user that is inactive. This way the students aren’t taking up a seat during the graduation transition but yet they will have time to migrate their files.\n\n## Is an educational institution able to upgrade from the Community Edition to Enterprise Edition through the Education Program?\n\nYes! We recommend that even if you aren’t ready from the start to use Enterprise that you install the Enterprise Edition. You can use this edition even if you don’t have a license. Only the features available under the MIT license will be available without a subscription. Once you are ready to move to Enterprise, the institution can apply to the program, receive the license key and then activate in our Customers Portal without ever needing to install additional software. If you started with the Community Edition, you can still migrate, but there are some extra steps that may require some system down time. [See our migration guide](https://about.gitlab.com/install/ce-or-ee/) for more information.\n\n## Is support available as part of the Education Program?\n\nPriority support is not included with the Education Program license for either self-managed or hosted) is available for purchase at a discount ($4.95 per user / per month). Please note that support must be purchased for all the seats issued in the subscription. [See our support page for more details.](https://about.gitlab.com/support/)\n\nPaid support is not included with the Education Program license.  For assistance with your Education Program GitLab instance, we recommend using our Community Forum by opening a thread in the [Education Category](https://forum.gitlab.com/c/gitlab-for-education/37). We also encourage all program members to [introduce themselves on the forum](https://forum.gitlab.com/c/gitlab-for-education/introductions/38) so we can begin building connections! Check out [my introduction here](https://forum.gitlab.com/t/christina-hupy-senior-education-program-manager/39911?u=chupy).\n\n>The GitLab forum has over 15K users with over 500k pageviews per month and experienced 66% growth last year!\n\nWe do our best to make sure all of your questions are answered and also connect our community with technical experts here at GitLab.\n\n## Why doesn’t GitLab issue licenses to students directly?\n\nAt this time the GitLab for Education Program offers centralized Ultimate or Gold licenses to the educational institution directly rather than individuals. The licenses are distributed in a manner that is intended for large numbers (unlimited in fact!) of users. During the application process, one of our team members verifies that the educational institution and the use case meets the requirements of the program. The education institution then signs our terms and conditions as part of the subscription agreement. Our system is set up to rely on the institutions to issue the accounts to the individual student as they are in the best position to determine their eligibility through their existing authentication and enrollment systems.\n\nAgain, we strongly encourage students to take advantage of our [free (SaaS or Self-Managed) managed offerings](https://about.gitlab.com/pricing/) if they wish to have an individual account. Also, if students would like to demonstrate some of their amazing DevOps skills on their own account, we encourage them to sign up for our [30-day trial](https://about.gitlab.com/free-trial/) to test out some of the more advanced features.\n\nWe hope this post was useful for you and answered many of your questions regarding administration of a GitLab instance in education!\n\n### We encourage you to post any follow up questions you have to our [GitLab forum in the Education Category](https://forum.gitlab.com/c/gitlab-for-education/37). By posting your questions there, you’ll be able to connect with our diverse network of community members and contributors!\n\nCover image by [Ian Battaglia](https://unsplash.com/@ianjbattaglia) on [Unsplash](https://unsplash.com/photos/9drS5E_Rguc)\n{: .note}\n",[1078,9],{"slug":1325,"featured":6,"template":683},"administering-gitlab-edu","content:en-us:blog:administering-gitlab-edu.yml","Administering Gitlab Edu","en-us/blog/administering-gitlab-edu.yml","en-us/blog/administering-gitlab-edu",4,[664,688,711,732,753,776,796,820,841],1754424512974]