[{"data":1,"prerenderedAt":3752},["ShallowReactive",2],{"/en-us/blog/categories/devsecops/":3,"navigation-en-us":22,"banner-en-us":440,"footer-en-us":455,"devsecops-category-page-en-us":666},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"seo":8,"content":11,"config":12,"_id":15,"_type":16,"title":17,"_source":18,"_file":19,"_stem":20,"_extension":21},"/en-us/blog/categories/devsecops","categories",false,"",{"title":9,"description":10},"DevSecOps","Browse articles related to DevSecOps on the GitLab Blog",{"name":9},{"template":13,"slug":14,"hide":6},"BlogCategory","devsecops","content:en-us:blog:categories:devsecops.yml","yaml","Devsecops","content","en-us/blog/categories/devsecops.yml","en-us/blog/categories/devsecops","yml",{"_path":23,"_dir":24,"_draft":6,"_partial":6,"_locale":7,"data":25,"_id":436,"_type":16,"title":437,"_source":18,"_file":438,"_stem":439,"_extension":21},"/shared/en-us/main-navigation","en-us",{"logo":26,"freeTrial":31,"sales":36,"login":41,"items":46,"search":377,"minimal":408,"duo":427},{"config":27},{"href":28,"dataGaName":29,"dataGaLocation":30},"/","gitlab logo","header",{"text":32,"config":33},"Get free trial",{"href":34,"dataGaName":35,"dataGaLocation":30},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":37,"config":38},"Talk to sales",{"href":39,"dataGaName":40,"dataGaLocation":30},"/sales/","sales",{"text":42,"config":43},"Sign in",{"href":44,"dataGaName":45,"dataGaLocation":30},"https://gitlab.com/users/sign_in/","sign in",[47,91,187,192,298,358],{"text":48,"config":49,"cards":51,"footer":74},"Platform",{"dataNavLevelOne":50},"platform",[52,58,66],{"title":48,"description":53,"link":54},"The most comprehensive AI-powered DevSecOps Platform",{"text":55,"config":56},"Explore our Platform",{"href":57,"dataGaName":50,"dataGaLocation":30},"/platform/",{"title":59,"description":60,"link":61},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":62,"config":63},"Meet GitLab Duo",{"href":64,"dataGaName":65,"dataGaLocation":30},"/gitlab-duo/","gitlab duo ai",{"title":67,"description":68,"link":69},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":70,"config":71},"Learn more",{"href":72,"dataGaName":73,"dataGaLocation":30},"/why-gitlab/","why gitlab",{"title":75,"items":76},"Get started with",[77,82,87],{"text":78,"config":79},"Platform Engineering",{"href":80,"dataGaName":81,"dataGaLocation":30},"/solutions/platform-engineering/","platform engineering",{"text":83,"config":84},"Developer Experience",{"href":85,"dataGaName":86,"dataGaLocation":30},"/developer-experience/","Developer experience",{"text":88,"config":89},"MLOps",{"href":90,"dataGaName":88,"dataGaLocation":30},"/topics/devops/the-role-of-ai-in-devops/",{"text":92,"left":93,"config":94,"link":96,"lists":100,"footer":169},"Product",true,{"dataNavLevelOne":95},"solutions",{"text":97,"config":98},"View all Solutions",{"href":99,"dataGaName":95,"dataGaLocation":30},"/solutions/",[101,126,148],{"title":102,"description":103,"link":104,"items":109},"Automation","CI/CD and automation to accelerate deployment",{"config":105},{"icon":106,"href":107,"dataGaName":108,"dataGaLocation":30},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[110,114,118,122],{"text":111,"config":112},"CI/CD",{"href":113,"dataGaLocation":30,"dataGaName":111},"/solutions/continuous-integration/",{"text":115,"config":116},"AI-Assisted Development",{"href":64,"dataGaLocation":30,"dataGaName":117},"AI assisted development",{"text":119,"config":120},"Source Code Management",{"href":121,"dataGaLocation":30,"dataGaName":119},"/solutions/source-code-management/",{"text":123,"config":124},"Automated Software Delivery",{"href":107,"dataGaLocation":30,"dataGaName":125},"Automated software delivery",{"title":127,"description":128,"link":129,"items":134},"Security","Deliver code faster without compromising security",{"config":130},{"href":131,"dataGaName":132,"dataGaLocation":30,"icon":133},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[135,138,143],{"text":136,"config":137},"Security & Compliance",{"href":131,"dataGaLocation":30,"dataGaName":136},{"text":139,"config":140},"Software Supply Chain Security",{"href":141,"dataGaLocation":30,"dataGaName":142},"/solutions/supply-chain/","Software supply chain security",{"text":144,"config":145},"Compliance & Governance",{"href":146,"dataGaLocation":30,"dataGaName":147},"/solutions/continuous-software-compliance/","Compliance and governance",{"title":149,"link":150,"items":155},"Measurement",{"config":151},{"icon":152,"href":153,"dataGaName":154,"dataGaLocation":30},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[156,160,164],{"text":157,"config":158},"Visibility & Measurement",{"href":153,"dataGaLocation":30,"dataGaName":159},"Visibility and Measurement",{"text":161,"config":162},"Value Stream Management",{"href":163,"dataGaLocation":30,"dataGaName":161},"/solutions/value-stream-management/",{"text":165,"config":166},"Analytics & Insights",{"href":167,"dataGaLocation":30,"dataGaName":168},"/solutions/analytics-and-insights/","Analytics and insights",{"title":170,"items":171},"GitLab for",[172,177,182],{"text":173,"config":174},"Enterprise",{"href":175,"dataGaLocation":30,"dataGaName":176},"/enterprise/","enterprise",{"text":178,"config":179},"Small Business",{"href":180,"dataGaLocation":30,"dataGaName":181},"/small-business/","small business",{"text":183,"config":184},"Public Sector",{"href":185,"dataGaLocation":30,"dataGaName":186},"/solutions/public-sector/","public sector",{"text":188,"config":189},"Pricing",{"href":190,"dataGaName":191,"dataGaLocation":30,"dataNavLevelOne":191},"/pricing/","pricing",{"text":193,"config":194,"link":196,"lists":200,"feature":285},"Resources",{"dataNavLevelOne":195},"resources",{"text":197,"config":198},"View all resources",{"href":199,"dataGaName":195,"dataGaLocation":30},"/resources/",[201,234,257],{"title":202,"items":203},"Getting started",[204,209,214,219,224,229],{"text":205,"config":206},"Install",{"href":207,"dataGaName":208,"dataGaLocation":30},"/install/","install",{"text":210,"config":211},"Quick start guides",{"href":212,"dataGaName":213,"dataGaLocation":30},"/get-started/","quick setup checklists",{"text":215,"config":216},"Learn",{"href":217,"dataGaLocation":30,"dataGaName":218},"https://university.gitlab.com/","learn",{"text":220,"config":221},"Product documentation",{"href":222,"dataGaName":223,"dataGaLocation":30},"https://docs.gitlab.com/","product documentation",{"text":225,"config":226},"Best practice videos",{"href":227,"dataGaName":228,"dataGaLocation":30},"/getting-started-videos/","best practice videos",{"text":230,"config":231},"Integrations",{"href":232,"dataGaName":233,"dataGaLocation":30},"/integrations/","integrations",{"title":235,"items":236},"Discover",[237,242,247,252],{"text":238,"config":239},"Customer success stories",{"href":240,"dataGaName":241,"dataGaLocation":30},"/customers/","customer success stories",{"text":243,"config":244},"Blog",{"href":245,"dataGaName":246,"dataGaLocation":30},"/blog/","blog",{"text":248,"config":249},"Remote",{"href":250,"dataGaName":251,"dataGaLocation":30},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":253,"config":254},"TeamOps",{"href":255,"dataGaName":256,"dataGaLocation":30},"/teamops/","teamops",{"title":258,"items":259},"Connect",[260,265,270,275,280],{"text":261,"config":262},"GitLab Services",{"href":263,"dataGaName":264,"dataGaLocation":30},"/services/","services",{"text":266,"config":267},"Community",{"href":268,"dataGaName":269,"dataGaLocation":30},"/community/","community",{"text":271,"config":272},"Forum",{"href":273,"dataGaName":274,"dataGaLocation":30},"https://forum.gitlab.com/","forum",{"text":276,"config":277},"Events",{"href":278,"dataGaName":279,"dataGaLocation":30},"/events/","events",{"text":281,"config":282},"Partners",{"href":283,"dataGaName":284,"dataGaLocation":30},"/partners/","partners",{"backgroundColor":286,"textColor":287,"text":288,"image":289,"link":293},"#2f2a6b","#fff","Insights for the future of software development",{"altText":290,"config":291},"the source promo card",{"src":292},"/images/navigation/the-source-promo-card.svg",{"text":294,"config":295},"Read the latest",{"href":296,"dataGaName":297,"dataGaLocation":30},"/the-source/","the source",{"text":299,"config":300,"lists":302},"Company",{"dataNavLevelOne":301},"company",[303],{"items":304},[305,310,316,318,323,328,333,338,343,348,353],{"text":306,"config":307},"About",{"href":308,"dataGaName":309,"dataGaLocation":30},"/company/","about",{"text":311,"config":312,"footerGa":315},"Jobs",{"href":313,"dataGaName":314,"dataGaLocation":30},"/jobs/","jobs",{"dataGaName":314},{"text":276,"config":317},{"href":278,"dataGaName":279,"dataGaLocation":30},{"text":319,"config":320},"Leadership",{"href":321,"dataGaName":322,"dataGaLocation":30},"/company/team/e-group/","leadership",{"text":324,"config":325},"Team",{"href":326,"dataGaName":327,"dataGaLocation":30},"/company/team/","team",{"text":329,"config":330},"Handbook",{"href":331,"dataGaName":332,"dataGaLocation":30},"https://handbook.gitlab.com/","handbook",{"text":334,"config":335},"Investor relations",{"href":336,"dataGaName":337,"dataGaLocation":30},"https://ir.gitlab.com/","investor relations",{"text":339,"config":340},"Trust Center",{"href":341,"dataGaName":342,"dataGaLocation":30},"/security/","trust center",{"text":344,"config":345},"AI Transparency Center",{"href":346,"dataGaName":347,"dataGaLocation":30},"/ai-transparency-center/","ai transparency center",{"text":349,"config":350},"Newsletter",{"href":351,"dataGaName":352,"dataGaLocation":30},"/company/contact/","newsletter",{"text":354,"config":355},"Press",{"href":356,"dataGaName":357,"dataGaLocation":30},"/press/","press",{"text":359,"config":360,"lists":361},"Contact us",{"dataNavLevelOne":301},[362],{"items":363},[364,367,372],{"text":37,"config":365},{"href":39,"dataGaName":366,"dataGaLocation":30},"talk to sales",{"text":368,"config":369},"Get help",{"href":370,"dataGaName":371,"dataGaLocation":30},"/support/","get help",{"text":373,"config":374},"Customer portal",{"href":375,"dataGaName":376,"dataGaLocation":30},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":378,"login":379,"suggestions":386},"Close",{"text":380,"link":381},"To search repositories and projects, login to",{"text":382,"config":383},"gitlab.com",{"href":44,"dataGaName":384,"dataGaLocation":385},"search login","search",{"text":387,"default":388},"Suggestions",[389,391,395,397,401,405],{"text":59,"config":390},{"href":64,"dataGaName":59,"dataGaLocation":385},{"text":392,"config":393},"Code Suggestions (AI)",{"href":394,"dataGaName":392,"dataGaLocation":385},"/solutions/code-suggestions/",{"text":111,"config":396},{"href":113,"dataGaName":111,"dataGaLocation":385},{"text":398,"config":399},"GitLab on AWS",{"href":400,"dataGaName":398,"dataGaLocation":385},"/partners/technology-partners/aws/",{"text":402,"config":403},"GitLab on Google Cloud",{"href":404,"dataGaName":402,"dataGaLocation":385},"/partners/technology-partners/google-cloud-platform/",{"text":406,"config":407},"Why GitLab?",{"href":72,"dataGaName":406,"dataGaLocation":385},{"freeTrial":409,"mobileIcon":414,"desktopIcon":419,"secondaryButton":422},{"text":410,"config":411},"Start free trial",{"href":412,"dataGaName":35,"dataGaLocation":413},"https://gitlab.com/-/trials/new/","nav",{"altText":415,"config":416},"Gitlab Icon",{"src":417,"dataGaName":418,"dataGaLocation":413},"/images/brand/gitlab-logo-tanuki.svg","gitlab icon",{"altText":415,"config":420},{"src":421,"dataGaName":418,"dataGaLocation":413},"/images/brand/gitlab-logo-type.svg",{"text":423,"config":424},"Get Started",{"href":425,"dataGaName":426,"dataGaLocation":413},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":428,"mobileIcon":432,"desktopIcon":434},{"text":429,"config":430},"Learn more about GitLab Duo",{"href":64,"dataGaName":431,"dataGaLocation":413},"gitlab duo",{"altText":415,"config":433},{"src":417,"dataGaName":418,"dataGaLocation":413},{"altText":415,"config":435},{"src":421,"dataGaName":418,"dataGaLocation":413},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":441,"_dir":24,"_draft":6,"_partial":6,"_locale":7,"title":442,"button":443,"image":447,"config":450,"_id":452,"_type":16,"_source":18,"_file":453,"_stem":454,"_extension":21},"/shared/en-us/banner","is now in public beta!",{"text":70,"config":444},{"href":445,"dataGaName":446,"dataGaLocation":30},"/gitlab-duo/agent-platform/","duo banner",{"config":448},{"src":449},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1753720689/somrf9zaunk0xlt7ne4x.svg",{"layout":451},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":456,"_dir":24,"_draft":6,"_partial":6,"_locale":7,"data":457,"_id":662,"_type":16,"title":663,"_source":18,"_file":664,"_stem":665,"_extension":21},"/shared/en-us/main-footer",{"text":458,"source":459,"edit":465,"contribute":470,"config":475,"items":480,"minimal":654},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":460,"config":461},"View page source",{"href":462,"dataGaName":463,"dataGaLocation":464},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":466,"config":467},"Edit this page",{"href":468,"dataGaName":469,"dataGaLocation":464},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":471,"config":472},"Please contribute",{"href":473,"dataGaName":474,"dataGaLocation":464},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":476,"facebook":477,"youtube":478,"linkedin":479},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[481,504,561,590,624],{"title":48,"links":482,"subMenu":487},[483],{"text":484,"config":485},"DevSecOps platform",{"href":57,"dataGaName":486,"dataGaLocation":464},"devsecops platform",[488],{"title":188,"links":489},[490,494,499],{"text":491,"config":492},"View plans",{"href":190,"dataGaName":493,"dataGaLocation":464},"view plans",{"text":495,"config":496},"Why Premium?",{"href":497,"dataGaName":498,"dataGaLocation":464},"/pricing/premium/","why premium",{"text":500,"config":501},"Why Ultimate?",{"href":502,"dataGaName":503,"dataGaLocation":464},"/pricing/ultimate/","why ultimate",{"title":505,"links":506},"Solutions",[507,512,515,517,522,527,531,534,538,543,545,548,551,556],{"text":508,"config":509},"Digital transformation",{"href":510,"dataGaName":511,"dataGaLocation":464},"/topics/digital-transformation/","digital transformation",{"text":136,"config":513},{"href":131,"dataGaName":514,"dataGaLocation":464},"security & compliance",{"text":125,"config":516},{"href":107,"dataGaName":108,"dataGaLocation":464},{"text":518,"config":519},"Agile development",{"href":520,"dataGaName":521,"dataGaLocation":464},"/solutions/agile-delivery/","agile delivery",{"text":523,"config":524},"Cloud transformation",{"href":525,"dataGaName":526,"dataGaLocation":464},"/topics/cloud-native/","cloud transformation",{"text":528,"config":529},"SCM",{"href":121,"dataGaName":530,"dataGaLocation":464},"source code management",{"text":111,"config":532},{"href":113,"dataGaName":533,"dataGaLocation":464},"continuous integration & delivery",{"text":535,"config":536},"Value stream management",{"href":163,"dataGaName":537,"dataGaLocation":464},"value stream management",{"text":539,"config":540},"GitOps",{"href":541,"dataGaName":542,"dataGaLocation":464},"/solutions/gitops/","gitops",{"text":173,"config":544},{"href":175,"dataGaName":176,"dataGaLocation":464},{"text":546,"config":547},"Small business",{"href":180,"dataGaName":181,"dataGaLocation":464},{"text":549,"config":550},"Public sector",{"href":185,"dataGaName":186,"dataGaLocation":464},{"text":552,"config":553},"Education",{"href":554,"dataGaName":555,"dataGaLocation":464},"/solutions/education/","education",{"text":557,"config":558},"Financial services",{"href":559,"dataGaName":560,"dataGaLocation":464},"/solutions/finance/","financial services",{"title":193,"links":562},[563,565,567,569,572,574,576,578,580,582,584,586,588],{"text":205,"config":564},{"href":207,"dataGaName":208,"dataGaLocation":464},{"text":210,"config":566},{"href":212,"dataGaName":213,"dataGaLocation":464},{"text":215,"config":568},{"href":217,"dataGaName":218,"dataGaLocation":464},{"text":220,"config":570},{"href":222,"dataGaName":571,"dataGaLocation":464},"docs",{"text":243,"config":573},{"href":245,"dataGaName":246,"dataGaLocation":464},{"text":238,"config":575},{"href":240,"dataGaName":241,"dataGaLocation":464},{"text":248,"config":577},{"href":250,"dataGaName":251,"dataGaLocation":464},{"text":261,"config":579},{"href":263,"dataGaName":264,"dataGaLocation":464},{"text":253,"config":581},{"href":255,"dataGaName":256,"dataGaLocation":464},{"text":266,"config":583},{"href":268,"dataGaName":269,"dataGaLocation":464},{"text":271,"config":585},{"href":273,"dataGaName":274,"dataGaLocation":464},{"text":276,"config":587},{"href":278,"dataGaName":279,"dataGaLocation":464},{"text":281,"config":589},{"href":283,"dataGaName":284,"dataGaLocation":464},{"title":299,"links":591},[592,594,596,598,600,602,604,608,613,615,617,619],{"text":306,"config":593},{"href":308,"dataGaName":301,"dataGaLocation":464},{"text":311,"config":595},{"href":313,"dataGaName":314,"dataGaLocation":464},{"text":319,"config":597},{"href":321,"dataGaName":322,"dataGaLocation":464},{"text":324,"config":599},{"href":326,"dataGaName":327,"dataGaLocation":464},{"text":329,"config":601},{"href":331,"dataGaName":332,"dataGaLocation":464},{"text":334,"config":603},{"href":336,"dataGaName":337,"dataGaLocation":464},{"text":605,"config":606},"Sustainability",{"href":607,"dataGaName":605,"dataGaLocation":464},"/sustainability/",{"text":609,"config":610},"Diversity, inclusion and belonging (DIB)",{"href":611,"dataGaName":612,"dataGaLocation":464},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":339,"config":614},{"href":341,"dataGaName":342,"dataGaLocation":464},{"text":349,"config":616},{"href":351,"dataGaName":352,"dataGaLocation":464},{"text":354,"config":618},{"href":356,"dataGaName":357,"dataGaLocation":464},{"text":620,"config":621},"Modern Slavery Transparency Statement",{"href":622,"dataGaName":623,"dataGaLocation":464},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":625,"links":626},"Contact Us",[627,630,632,634,639,644,649],{"text":628,"config":629},"Contact an expert",{"href":39,"dataGaName":40,"dataGaLocation":464},{"text":368,"config":631},{"href":370,"dataGaName":371,"dataGaLocation":464},{"text":373,"config":633},{"href":375,"dataGaName":376,"dataGaLocation":464},{"text":635,"config":636},"Status",{"href":637,"dataGaName":638,"dataGaLocation":464},"https://status.gitlab.com/","status",{"text":640,"config":641},"Terms of use",{"href":642,"dataGaName":643,"dataGaLocation":464},"/terms/","terms of use",{"text":645,"config":646},"Privacy statement",{"href":647,"dataGaName":648,"dataGaLocation":464},"/privacy/","privacy statement",{"text":650,"config":651},"Cookie preferences",{"dataGaName":652,"dataGaLocation":464,"id":653,"isOneTrustButton":93},"cookie preferences","ot-sdk-btn",{"items":655},[656,658,660],{"text":640,"config":657},{"href":642,"dataGaName":643,"dataGaLocation":464},{"text":645,"config":659},{"href":647,"dataGaName":648,"dataGaLocation":464},{"text":650,"config":661},{"dataGaName":652,"dataGaLocation":464,"id":653,"isOneTrustButton":93},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"featuredPost":667,"allPosts":689,"totalPages":3750,"initialPosts":3751},{"_path":668,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":669,"content":672,"config":682,"_id":685,"_type":16,"title":686,"_source":18,"_file":687,"_stem":688,"_extension":21},"/en-us/blog/why-now-is-the-time-for-embedded-devsecops",{"noIndex":6,"title":670,"description":671},"Why now is the time for embedded DevSecOps","Learn how embedded development teams address long feedback cycles, manual compliance, and isolated development with DevSecOps.",{"title":670,"description":671,"authors":673,"heroImage":675,"date":676,"body":677,"category":14,"tags":678},[674],"Matt DeLaney","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659978/Blog/Hero%20Images/automation.png","2025-07-01","For embedded systems teams, DevSecOps has traditionally seemed like an approach better suited to SaaS applications than firmware development. But this is changing. Software is now a primary differentiator in hardware products. New market expectations demand modern development practices. In response, organizations are pursuing \"embedded DevSecOps.\"\n\nWhat is embedded DevSecOps? The application of collaborative engineering practices, integrated toolchains, and automation for building, testing, and securing software to embedded systems development. Embedded DevSecOps includes necessary adaptations for hardware integration.\n## Convergence of market forces\nThree powerful market forces are converging to compel embedded teams to modernize their development practices.\n### 1. The software-defined product revolution\nProducts once defined primarily by their hardware are now differentiated by their software capabilities. The software-defined vehicle (SDV) market tells a compelling story in this regard. It's projected to grow from $213.5 billion in 2024 to [$1.24 trillion](https://www.marketsandmarkets.com/Market-Reports/software-defined-vehicles-market-187205966.html) by 2030, a massive 34% compound annual growth rate.\nThe software content in these products is growing considerably. By the end of 2025, the average vehicle is expected to contain [650 million lines of code](https://www.statista.com/statistics/1370978/automotive-software-average-lines-of-codes-per-vehicle-globally/). Traditional embedded development approaches cannot handle this level of software complexity. \n### 2. Hardware virtualization as a technical enabler\nHardware virtualization is a key technical enabler of embedded DevSecOps. Virtual electronic control units (vECUs), cloud-based ARM CPUs, and sophisticated simulation environments are becoming more prevalent. Virtual hardware allows testing that once required physical hardware.\n\nThese virtualization technologies provide a foundation for continuous integration ([CI](https://about.gitlab.com/topics/ci-cd/)). But their value is fully realized only when integrated into an automated workflow. Combined with collaborative development practices and automated pipelines, virtual testing helps teams detect issues much earlier, when fixes are far less expensive. Without embedded DevSecOps practices and tooling to orchestrate these virtual resources, organizations can't capitalize on the virtualization trend.\n### 3. The competitive and economic reality\nThree interrelated forces are reshaping the competitive landscape for embedded development:\n- The talent war has shifted decisively. As an embedded systems leader at a GitLab customer explained, “No embedded engineers graduating from college today know legacy tools like Perforce. They know Git. These young engineers will work at a company for six months on legacy tools, then quit.” Companies using outdated tools may lose their engineering future.\n- This talent advantage translates into competitive superiority. Tech-forward companies that attract top engineers with modern practices achieve remarkable results. For example, in 2024, [SpaceX](https://spacenews.com/spacex-launch-surge-helps-set-new-global-launch-record-in-2024/) performed more orbital launches than the rest of the world combined. Tech-forward companies excel at software development and embrace a modern development culture. This, among other things, creates efficiencies that legacy companies struggle to match. \n- The rising costs of embedded development — driven by long feedback cycles — create an urgent need for embedded DevSecOps. When developers have to wait weeks to test code on hardware test benches, productivity remains inherently low. Engineers lose context and must switch contexts when results arrive. The problem worsens when defects enter the picture. Bugs become more expensive to fix the later they're discovered. Long feedback cycles magnify this problem in embedded systems.\n\nOrganizations are adopting embedded DevSecOps to help combat these challenges.\n## Priority transformation areas\nBased on these market forces, forward-thinking embedded systems leaders are implementing embedded DevSecOps in the following ways. \n### From hardware bottlenecks to continuous testing\nHardware-testing bottlenecks represent one of the most significant constraints in traditional embedded development. These delays create the unfavorable economics described earlier — when developers wait weeks for hardware access, defect costs spiral.\nAddressing this challenge requires a multifaceted approach including: \n* Automating the orchestration of expensive shared hardware test benches among embedded developers  \n* Integrating both SIL (Software-in-the-Loop) and HIL (Hardware-in-the-Loop) testing into automated CI pipelines  \n* Standardizing builds with version-controlled environments\n\nEmbedded developers can accomplish this with GitLab's [On-Premises Device Cloud](https://gitlab.com/gitlab-accelerates-embedded/comp/device-cloud), a CI/CD component. Through automating the orchestration of firmware tests on virtual and real hardware, teams are better positioned to reduce feedback cycles from weeks to hours. They also can catch more bugs early on in the software development lifecycle.\n### Automating compliance and security governance\nEmbedded systems face strict regulatory requirements. Manual compliance processes are unsustainable.\nLeading organizations are transforming how they comply with these requirements by: \n* Replacing manual workflows with automated [compliance frameworks](https://about.gitlab.com/blog/introducing-custom-compliance-frameworks-in-gitlab/)  \n* Integrating specialized functional safety, security, and code quality tools into automated continuous integration pipelines  \n* Automating approval workflows, enforcing code reviews, and maintaining audit trails  \n* Configuring compliance frameworks for specific standards like ISO 26262 or DO-178C\n\nThis approach enables greater compliance maturity without additional headcount — turning what was once a burden into a competitive advantage. One leading electric vehicle (EV) manufacturer executes 120,000 CI/CD jobs per day with GitLab, many of which include compliance checks. And they can fix and deploy bug fixes to vehicles within an hour of discovery. This level of scale and speed would be extremely difficult without automated compliance workflows.\n### Enabling collaborative innovation\nHistorically, for valid business and technical reasons, embedded developers have largely worked alone at their desks. Collaboration has been limited. Innovative organizations break down these barriers by enabling shared code visibility through integrated source control and CI/CD workflows. These modern practices attract and retain engineers while unlocking innovation that would remain hidden in isolated workflows.\nAs one director of DevOps at a tech-forward automotive manufacturer (a GitLab customer) explains: \"It's really critical for us to have a single pane of glass that we can look at and see the statuses. The developers, when they bring a merge request, are aware of the status of a given workflow in order to move as fast as possible.\" This transparency accelerates innovation, enabling automakers to rapidly iterate on software features that differentiate their vehicles in an increasingly competitive market.\n## The window of opportunity\nEmbedded systems leaders have a clear window of opportunity to gain a competitive advantage through DevSecOps adoption. But the window won't stay open forever. Software continues to become the primary differentiator in embedded products, and the gap between leaders and laggards will only widen.\nOrganizations that successfully adopt DevSecOps will reduce costs, accelerate time-to-market, and unlock innovation that differentiates them in the market. The embedded systems leaders of tomorrow are the ones embracing DevSecOps today.\n> While this article explored why now is the critical time for embedded teams to adopt DevSecOps, you may be wondering about the practical steps to get started. Learn how to put these concepts into action with our guide: [4 ways to accelerate embedded development with GitLab](https://about.gitlab.com/blog/4-ways-to-accelerate-embedded-development-with-gitlab/).",[679,680,111,681],"embedded DevOps","product","automotive",{"featured":6,"template":683,"slug":684},"BlogPost","why-now-is-the-time-for-embedded-devsecops","content:en-us:blog:why-now-is-the-time-for-embedded-devsecops.yml","Why Now Is The Time For Embedded Devsecops","en-us/blog/why-now-is-the-time-for-embedded-devsecops.yml","en-us/blog/why-now-is-the-time-for-embedded-devsecops",[690,712,736,757,779,800,823,844,864,885,909,931,950,970,990,1012,1034,1055,1076,1096,1115,1135,1156,1176,1195,1217,1238,1257,1275,1295,1315,1334,1354,1376,1395,1415,1435,1453,1473,1495,1514,1535,1555,1576,1596,1615,1635,1655,1675,1695,1715,1733,1752,1772,1793,1811,1830,1849,1869,1888,1908,1930,1950,1968,1986,2005,2026,2045,2064,2083,2103,2122,2141,2160,2180,2199,2218,2237,2254,2273,2293,2312,2331,2350,2369,2389,2408,2427,2445,2464,2481,2500,2519,2537,2558,2579,2599,2618,2638,2658,2678,2699,2719,2737,2757,2777,2796,2816,2835,2854,2875,2893,2912,2931,2950,2970,2990,3009,3028,3047,3065,3084,3102,3120,3139,3158,3178,3197,3215,3235,3254,3273,3293,3311,3329,3348,3367,3387,3406,3424,3444,3462,3480,3498,3518,3537,3557,3576,3595,3613,3631,3649,3668,3689,3709,3730],{"_path":691,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":692,"content":700,"config":706,"_id":708,"_type":16,"title":709,"_source":18,"_file":710,"_stem":711,"_extension":21},"/en-us/blog/why-are-organizations-moving-to-a-unified-devsecops-platform",{"title":693,"description":694,"ogTitle":693,"ogDescription":694,"noIndex":6,"ogImage":695,"ogUrl":696,"ogSiteName":697,"ogType":698,"canonicalUrls":696,"schema":699},"Why are organizations moving to a unified DevSecOps platform?","Learn about GitLab's comprehensive, unified DevSecOps platform, which integrates tools, enhances security, and leverages AI for efficient software development.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097063/Blog/Hero%20Images/Blog/Hero%20Images/securitylifecycle-light_securitylifecycle-light.png_1750097063583.png","https://about.gitlab.com/blog/why-are-organizations-moving-to-a-unified-devsecops-platform","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why are organizations moving to a unified DevSecOps platform?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2025-06-02\",\n      }",{"title":693,"description":694,"authors":701,"heroImage":695,"date":703,"body":704,"category":14,"tags":705},[702],"Itzik Gan Baruch","2025-06-02","In today’s modern software development landscape, many organizations are migrating to the cloud and adopting DevSecOps processes. However, this transition presents a significant challenge: a proliferation of tools and legacy systems not designed for modern development. To adapt these systems to DevSecOps, organizations must create integrations between multiple tools for task management, CI/CD, security, monitoring, and more. The result? Operational complexity, high maintenance costs, and disrupted collaboration between development and operations teams. Additionally, developers experience frustration as they constantly switch between different tools to complete a single development flow – from planning to production.\n\n![The complexity and operational costs of integrating multiple tools into a DevSecOps process](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097077/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097077287.jpg)\n\n\u003Ccenter>\u003Ci>How complex it can be to integrate multiple tools into a DevSecOps process\u003C/i>\u003C/center>\n\n\u003Cbr>\u003C/br>\n\nThe good news is that a solution exists: A comprehensive DevSecOps platform offering a unified approach to software development.\n\nThese platforms are built for organizations operating in cloud-based and DevSecOps environments, consolidating all software development stages — from code management, CI/CD processes, task management, and security to AI-driven automation — into a single platform. Centralizing all software development workflows in a unified interface enables development and operations teams to work more efficiently, streamline communication, and minimize operational complexities and disruptions.\n\nFurthermore, the developer experience significantly improves — engineers are much happier working with a product designed specifically for modern development needs.\n\nIn the sections below, we’ll explore how GitLab helps teams overcome common challenges — whether it’s managing projects and tasks, ensuring security and compliance, or adopting AI-powered development tools – all within a single, unified platform.\n\n## Integrated Agile project management\n\nGitLab provides a holistic solution in which project and task management are fully integrated across all stages of the software development lifecycle, such as CI/CD, enabling real-time tracking of development progress. Issues and epics directly link to automation processes, allowing a seamless flow from planning to production deployment. This approach enhances transparency across teams, reduces delays, and ensures that all stakeholders have a clear view of the development status in real-time.\n\n![Issues and epics directly link to automation processes, allowing a seamless flow from planning to production deployment.](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097077/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097077288.jpg)\n\n## Built-in security\nGitLab strongly emphasizes integrating security capabilities end-to-end (security first). The platform integrates a wide range of automated security scanners, including:\n\n- [Dependency Scanning](https://docs.gitlab.com/user/application_security/dependency_scanning/)\n- [Static Application Security Testing (SAST)](https://docs.gitlab.com/user/application_security/sast/)\n- [Dynamic Application Security Testing (DAST)](https://docs.gitlab.com/user/application_security/dast/)\n- [Secret Detection](https://docs.gitlab.com/user/application_security/secret_detection/)\n- [Container Scanning](https://docs.gitlab.com/user/application_security/container_scanning/)\n\n![Security scanning capabilities integrated into the CI/CD process at various development stages](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097077/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097077289.jpg)\n\n\u003Ccenter>\u003Ci>Security scanning capabilities integrated into the CI/CD process at various development stages\u003C/i>\u003C/center>\n\n\u003Cbr>\u003C/br>\n\nThese security checks are built directly into every phase of the software development lifecycle, including the CI/CD pipeline, to provide developers with immediate feedback on potential security issues early in the development cycle.\n\n## Compliance and regulatory requirements\n\nBeyond efficiency and user experience, many organizations — especially those in regulated industries such as financial institutions or large enterprises — must ensure their processes comply with strict security and compliance standards. They need the ability to enforce policies for different projects, such as mandating a security scanner every time a CI/CD pipeline runs on specific code branches (e.g., main or protected branches) or requiring specific approvals before merging code into the main branch.\n\nWith GitLab, this becomes easier through [Compliance Frameworks](https://about.gitlab.com/blog/introducing-custom-compliance-frameworks-in-gitlab/), a feature that allows organizations to define and enforce structured policies for selected projects. This ensures compliance with automatic regulatory and security requirements while maintaining a seamless and efficient developer workflow.\n\n## AI-powered development\n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/) provides AI-driven assistance across all development stages, eliminating the need to switch to external tools. Every AI-powered request is processed within the full context of the project and codebase, enabling smarter and more efficient work.\n\nAI can perform example tasks such as:\n- automatic task description generation\n- smart summarization of issue discussions, saving developers valuable time\n- advanced code review capabilities\n- code improvement and optimization suggestions\n- automated test generation\n- security vulnerability detection and remediation\n- troubleshooting root cause analysis for CI pipeline failures\n- privacy and Data Security\n\nUnderstanding the needs of regulated organizations, particularly in the public and financial sectors, GitLab offers a unique solution for running AI models in a secure environment. GitLab Duo Self-Hosted enables organizations to maintain full control over data privacy, security, and the deployment of large language models ([LLMs](https://about.gitlab.com/blog/what-is-a-large-language-model-llm/)) in their own infrastructure, ensuring:\n- data privacy protection\n- compliance with regulatory requirements\n- maximum security\n- AI benefits without external network dependencies or risks\n\n## Summary\n\nOrganizations need a comprehensive DevSecOps platform to streamline processes, enhance security, and accelerate innovation. GitLab delivers precisely that — a single application consolidating all essential development, security, and operational tools with built-in security integration and AI-powered automation.\n\nReady to see GitLab in action? Explore interactive demos of:\n\n- [GitLab Premium and Ultimate with Duo](https://gitlab.navattic.com/gitlab-premium-with-duo) – experience AI-powered development assistance\n\n- [Adding security to the CI/CD pipeline](https://gitlab.navattic.com/gitlab-scans) – see how integrated security scanning protects your software\n\n- [Compliance frameworks](https://gitlab.navattic.com/compliance) – discover how GitLab enforces policies across projects for better governance\n\n> Join the GitLab 18 virtual launch event to learn about the future of the DevSecOps platform, including the role of agentic AI. [Register today!](https://about.gitlab.com/eighteen/)",[9,484,680],{"slug":707,"featured":6,"template":683},"why-are-organizations-moving-to-a-unified-devsecops-platform","content:en-us:blog:why-are-organizations-moving-to-a-unified-devsecops-platform.yml","Why Are Organizations Moving To A Unified Devsecops Platform","en-us/blog/why-are-organizations-moving-to-a-unified-devsecops-platform.yml","en-us/blog/why-are-organizations-moving-to-a-unified-devsecops-platform",{"_path":713,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":714,"content":721,"config":729,"_id":732,"_type":16,"title":733,"_source":18,"_file":734,"_stem":735,"_extension":21},"/en-us/blog/overcome-ai-sprawl-with-a-value-stream-management-approach",{"title":715,"description":716,"ogTitle":715,"ogDescription":716,"config":717,"ogImage":718,"ogUrl":719,"ogSiteName":697,"ogType":698,"canonicalUrls":719,"schema":720},"Overcome AI sprawl with a Value Stream Management approach","From The Source: Learn how an AI strategy based on Value Stream Management can stop AI sprawl and supply chain constraints and drive ROI.",{"noIndex":93},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665000/Blog/Hero%20Images/display-the-source-article-overcome-ai-sprawl-image-0492-1800x945-fy25.png","https://about.gitlab.com/blog/overcome-ai-sprawl-with-a-value-stream-management-approach","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Overcome AI sprawl with a Value Stream Management approach\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stephen Walters\"}],\n        \"datePublished\": \"2025-01-06\",\n      }",{"title":715,"description":716,"authors":722,"heroImage":718,"date":724,"body":725,"category":14,"tags":726},[723],"Stephen Walters","2025-01-06","This is a cross-over post about [overcoming AI sprawl with a Value Stream Management approach](https://about.gitlab.com/the-source/ai/overcome-ai-sprawl-with-a-value-stream-management-approach/).",[9,727,728],"workflow","performance",{"slug":730,"featured":6,"template":683,"externalUrl":731},"overcome-ai-sprawl-with-a-value-stream-management-approach","https://about.gitlab.com/the-source/ai/overcome-ai-sprawl-with-a-value-stream-management-approach/","content:en-us:blog:overcome-ai-sprawl-with-a-value-stream-management-approach.yml","Overcome Ai Sprawl With A Value Stream Management Approach","en-us/blog/overcome-ai-sprawl-with-a-value-stream-management-approach.yml","en-us/blog/overcome-ai-sprawl-with-a-value-stream-management-approach",{"_path":737,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":738,"content":744,"config":751,"_id":753,"_type":16,"title":754,"_source":18,"_file":755,"_stem":756,"_extension":21},"/en-us/blog/ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation",{"title":739,"description":740,"ogTitle":739,"ogDescription":740,"noIndex":6,"ogImage":741,"ogUrl":742,"ogSiteName":697,"ogType":698,"canonicalUrls":742,"schema":743},"Ultimate guide to CI/CD: Fundamentals to advanced implementation","Learn how to modernize continuous integration/continuous deployment, including automating the development, delivery, and security of pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749660151/Blog/Hero%20Images/blog-image-template-1800x945__26_.png","https://about.gitlab.com/blog/ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ultimate guide to CI/CD: Fundamentals to advanced implementation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2025-01-06\",\n      }",{"title":739,"description":740,"authors":745,"heroImage":741,"date":724,"body":747,"category":14,"tags":748},[746],"Sandra Gittlen","Continuous integration/continuous delivery ([CI/CD](https://about.gitlab.com/topics/ci-cd/)) has revolutionized how software teams create value for their users. Gone are the days of manual deployments and integration headaches — modern development demands automation, reliability, and speed.\n\nAt its core, CI/CD is about creating a seamless pipeline that takes code from a developer's environment all the way to production and incorporates feedback in real time. [CI](https://about.gitlab.com/topics/ci-cd/benefits-continuous-integration/) helps teams catch issues early — before they become costly problems — by ensuring that code changes are frequently merged into a shared repository, automatically tested, and validated. [CD](https://about.gitlab.com/topics/ci-cd/#what-is-continuous-delivery-cd) extends this by automating deployments, making releases predictable and stress-free.\n\nRather than relying on manual processes and complex toolchains for software development, teams can use a robust CI/CD pipeline to build, test, and deploy software. And AI can streamline the process even further, automatically engineering CI/CD pipelines for consistent quality, compliance, and security checks.\n\nThis guide explains modern CI/CD pipelines, from basic principles to best practices to advanced strategies. You'll also discover how leading organizations use CI/CD for impactful results. What you learn in this guide will help you scale your DevSecOps environment to develop and deliver software in an [agile](https://about.gitlab.com/topics/ci-cd/continuous-integration-agile/), automated, and efficient manner.\n\nWhat you'll learn:\n- [What is continuous integration?](#what-is-continuous-integration%3F)\n- [What is continuous delivery?](#what-is-continuous-delivery%3F)\n- [How source code management relates to CI/CD](#how-source-code-management-relates-to-cicd)\n- [The benefits of CI/CD in modern software development](#the-benefits-of-cicd-in-modern-software-development)\n  - [Key differences between CI/CD and traditional development](#key-differences-between-cicd-and-traditional-development)\n- [Understanding CI/CD fundamentals](#understanding-cicd-fundamentals)\n  - [What is a CI/CD pipeline?](#what-is-a-cicd-pipeline%3F)\n- [Best practices for CI/CD implementation and management](#best-practices-for-cicd-implementation-and-management)\n  - [CI best practices](#ci-best-practices)\n  - [CD best practices](#cd-best-practices)\n- [How to get started with CI/CD](#how-to-get-started-with-cicd)\n- [Security, compliance, and CI/CD](#security-compliance%2C-and-cicd)\n- [CI/CD and the cloud](#cicd-and-the-cloud)\n- [Advanced CI/CD](#advanced-cicd)\n  - [Reuse and automation in CI/CD](#reuse-and-automation-in-cicd)\n  - [Troubleshooting pipelines with AI](#troubleshooting-pipelines-with-ai)\n- [How to migrate to GitLab CI/CD](#how-to-migrate-to-gitlab-cicd)\n- [Lessons from leading organizations](#lessons-from-leading-organizations)\n- [CI/CD tutorials](#cicd-tutorials)\n\n## What is continuous integration?\n\n[Continuous integration](https://about.gitlab.com/topics/ci-cd/benefits-continuous-integration/) (CI) is the practice of integrating all your code changes into the main branch of a shared source code repository early and often, automatically testing changes when you commit or merge them, and automatically kicking off a build. With continuous integration, teams can identify and fix errors and security issues more easily and much earlier in the development process.\n\n## What is continuous delivery?\n[Continuous delivery](https://about.gitlab.com/topics/ci-cd/#what-is-continuous-delivery-cd) (CD) – sometimes called _continuous deployment_ – enables organizations to deploy their applications automatically, allowing more time for developers to focus on monitoring deployment status and assure success. With continuous delivery, DevSecOps teams set the criteria for code releases ahead of time and when those criteria are met and validated, the code is deployed into the production environment. This allows organizations to be more nimble and get new features into the hands of users faster. \n\n## How source code management relates to CI/CD\n\nSource code management ([SCM](https://about.gitlab.com/solutions/source-code-management/)) and CI/CD form the foundation of modern software development practices. SCM systems like [Git](https://about.gitlab.com/blog/what-is-git-the-ultimate-guide-to-gits-role-and-functionality/) provide a centralized way to track changes, manage different versions of code, and facilitate collaboration among team members. When developers work on new features or bug fixes, they create branches from the main codebase, make their changes, and then [merge them through merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/). This branching strategy allows multiple developers to work simultaneously without interfering with each other's code, while maintaining a stable main branch that always contains production-ready code.\n\nCI/CD takes the code managed by SCM systems and automatically builds, tests, and validates it whenever changes are pushed. When a developer submits their code changes, the CI/CD system automatically retrieves the latest code, combines it with the existing codebase, and runs through a series of automated checks. These typically include compiling the code, running unit tests, performing static code analysis, and checking code coverage. If any of these steps fail, the team is immediately notified, allowing them to address issues before they impact other developers or make their way to production. This tight integration between source control and continuous integration creates a feedback loop that helps maintain code quality and prevents integration problems from accumulating.\n\n## The benefits of CI/CD in modern software development\n\n[CI/CD brings transformative benefits to modern software development](https://about.gitlab.com/blog/ten-reasons-why-your-business-needs-ci-cd/) by dramatically reducing the time and risk associated with delivering new features and fixes. The continuous feedback loop gives DevSecOps teams confidence their changes are automatically validated against the entire codebase. The result is higher quality software, faster delivery times, and more frequent releases that can quickly respond to user needs and market demands.\n\nPerhaps most importantly, CI/CD fosters a culture of collaboration and transparency within software development teams. When everyone can see the status of builds, tests, and deployments in real time, it becomes easier to identify and resolve bottlenecks in the delivery process. The automation provided by CI/CD also reduces the cognitive load on developers, freeing them to focus on writing code rather than managing manual deployment processes. This leads to improved developer satisfaction and productivity, while also reducing the risk traditionally associated with the entire software release process. Teams can experiment more freely knowing rapid code reviews are part of the process and they can quickly roll back changes if needed, which encourages innovation and continuous improvement.\n\n> Get started with GitLab CI/CD. [Sign up for GitLab Ultimate](https://about.gitlab.com/free-trial/devsecops/) and try the AI-powered DevSecOps platform free for 60 days.\n\n### Key differences between CI/CD and traditional development\n\nCI/CD differs from traditional software development in many ways, including:\n\n**Frequent code commits**\n\nDevelopers often work independently and infrequently upload their code to a main codebase, causing merge conflicts and other time-consuming issues. With CI/CD, developers push commits throughout the day, ensuring that conflicts are caught early and the codebase remains up to date.\n\n**Reduced risk**\n\nLengthy testing cycles and extensive pre-release planning are hallmarks of traditional software development. This is done to minimize risk but often hinders the ability to find and fix problems. Risk is managed in CI/CD by applying small, incremental changes that are closely monitored and easily reverted.\n\n**Automated and continuous testing**\n\nIn traditional software development, testing is done once development is complete. However, this causes problems, including delayed delivery and costly bug fixes. CI/CD supports automated testing that occurs continuously throughout development, sparked by each code commit. Developers also receive feedback they can take fast action on.\n\n**Automated, repeatable, and frequent deployments**\n\nWith CI/CD, deployments are automated processes that reduce the typical stress and effort associated with big software rollouts. The same deployment process can be repeated across environments, which saves time and reduces errors and inconsistencies.\n\n## Understanding CI/CD fundamentals\n\nCI/CD serves as a framework for building scalable, maintainable delivery processes, so it's critical for DevSecOps teams to firmly grasp its core concepts. A solid understanding of CI/CD principles enables teams to adapt strategies and practices as technology evolves, rather than being tied to legacy approaches. Here are some of the basics.\n\n### What is a CI/CD pipeline?\n\nA [CI/CD pipeline](https://about.gitlab.com/topics/ci-cd/cicd-pipeline/) is a series of steps, such as build, test, and deploy, that automate and streamline the software delivery process. [Each stage serves as a quality gate](https://about.gitlab.com/blog/guide-to-ci-cd-pipelines/), ensuring that only validated code moves forward. Early stages typically handle basic checks like compilation and unit testing, while later stages may include integration testing, performance testing, compliance testing, and staged deployments to various environments.\n\nThe pipeline can be configured to require manual approvals at critical points, such as before deploying to production, while automating routine tasks and providing quick feedback to developers about the health of their changes. This structured approach ensures consistency, reduces human error, and provides a clear audit trail of how code changes move from development to production. Modern pipelines are often implemented as code, allowing them to be version controlled, tested, and maintained just like application code.\n\nThese are other terms associated with CI/CD that are important to know:\n- **Commit:** a code change\n- **Job:** instructions a runner has to execute\n- **Runner:** an agent or server that executes each job individually that can spin up or down as needed\n- **Stages:** a keyword that defines certain job stages, such as \"build\" and \"deploy.\" Jobs of the same stage are executed in parallel. Pipelines are configured using a version-controlled YAML file, `.gitlab-ci.yml`, at the root level of a project.\n\n![CI/CD pipeline diagram](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673928/Blog/Content%20Images/1690824533476.png)\n\n## Best practices for CI/CD implementation and management\n\nHow successful you are with CI/CD depends greatly on the [best practices](https://about.gitlab.com/blog/how-to-keep-up-with-ci-cd-best-practices/) you implement. \n\n#### CI best practices\n\n* Commit early, commit often.\n* Optimize pipeline stages.\n* Make builds fast and simple.\n* Use failures to improve processes.\n* Make sure the test environment mirrors production.\n\n#### CD best practices\n\n* Start where you are – you can always iterate.\n* Understand the best continuous delivery is done with minimal tools.\n* Track what’s happening so issues and merge requests don't get out of hand.\n* Streamline user acceptance testing and staging with automation.\n* Manage the release pipeline through automation.\n* Implement monitoring for visibility and efficiency. \n\n> ### Bookmark this!\n>\n>Watch our [\"Intro to CI/CD\" webinar](https://www.youtube.com/watch?v=sQ7Nw3o0izc)!\n>\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube.com/embed/sQ7Nw3o0izc?si=3HpNqIClrc2ncr7Y\" title=\"Intro to CI/CD webinar\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## How to get started with CI/CD\n\nGetting started with CI/CD begins with identifying a simple but representative project to serve as your pilot. Choose a straightforward application with basic testing requirements, as this allows you to focus on learning the pipeline mechanics rather than dealing with complex deployment scenarios. Begin by ensuring your code is in [version control](https://about.gitlab.com/topics/version-control/) and has some [basic automated tests](https://about.gitlab.com/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci/) — even a few unit tests will suffice. The goal is to [create a minimal pipeline](https://about.gitlab.com/blog/how-to-learn-ci-cd-fast/) that you can gradually enhance as your understanding grows.\n\nFor GitLab specifically, the process starts with creating a `.gitlab-ci.yml` file in your project's root directory. This YAML file defines your pipeline stages (basic ones like build, test, and deploy) and jobs. A simple pipeline might look like this: The build stage compiles your code and creates artifacts, the test stage runs your unit tests, and the deploy stage pushes your application to a staging environment. GitLab will automatically detect this file and start running your pipeline whenever changes are pushed to your repository. The platform provides [built-in runners](https://docs.gitlab.com/runner/) to execute your pipeline jobs, though you can also set up your own runners for more control.\n\nAs you become comfortable with the basics, gradually add more sophisticated elements to your pipeline. This might include adding code quality checks, [security scanning](https://docs.gitlab.com/ee/user/application_security/#security-scanning), or automated deployment to production. GitLab's DevSecOps platform includes features like [compliance management](https://about.gitlab.com/blog/meet-regulatory-standards-with-gitlab/), [deployment variables](https://about.gitlab.com/blog/demystifying-ci-cd-variables/), and manual approval gates that you can incorporate as your pipeline matures. Pay attention to pipeline execution time and look for opportunities to run jobs in parallel where possible. Remember to add proper error handling and notifications so team members are promptly alerted of any pipeline failures. Start documenting common issues and solutions as you encounter them — this will become invaluable as your team grows.\n\n> ### Want to learn more about getting started with CI/CD? Register for a [free CI/CD course on GitLab University](https://university.gitlab.com/courses/continuous-integration-and-delivery-ci-cd-with-gitlab).\n\n## Security, compliance, and CI/CD\n\nOne of the greatest advantages of CI/CD is the ability to embed security and compliance checks early and often in the software development lifecycle. In GitLab, teams can use the `.gitlab-ci.yml` configuration to automatically trigger security scans at multiple stages, from initial code commit to production deployment. The platform's container scanning, dependency scanning, and security scanning capabilities ([Dynamic Application Security Testing](https://docs.gitlab.com/ee/user/application_security/dast/) and [Advanced SAST](https://about.gitlab.com/blog/gitlab-advanced-sast-is-now-generally-available/)) can be configured to run automatically with each code change, checking for vulnerabilities, compliance violations, and security misconfigurations. The platform's API enables integration with [external security tools](https://about.gitlab.com/blog/integrate-external-security-scanners-into-your-devsecops-workflow/), while the test coverage features ensure security tests meet required thresholds.\n\nGitLab's security test reports provide detailed information about findings, enabling quick remediation of security issues before they reach production. The Security Dashboard provides a centralized view of vulnerabilities across projects, while [security policies can be enforced](https://about.gitlab.com/blog/how-gitlab-supports-the-nsa-and-cisa-cicd-security-guidance/) through merge request approvals and pipeline gates. In addition, GitLab provides multiple layers of secrets management to protect sensitive information throughout the CI/CD process, audit logs to track access to secrets, and role-based access control (RBAC) to ensure only authorized users can view or modify sensitive configuration data.\n\nGitLab also supports software bill of materials ([SBOM](https://about.gitlab.com/blog/the-ultimate-guide-to-sboms/)) generation, providing a comprehensive inventory of all software components, dependencies, and licenses in an application and enabling teams to quickly identify and respond to vulnerabilities and comply with regulatory mandates.\n\n## CI/CD and the cloud\n\nGitLab's CI/CD platform provides robust integration with major cloud providers including [Amazon Web Services](https://about.gitlab.com/partners/technology-partners/aws/), [Google Cloud Platform](https://about.gitlab.com/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci/), and [Microsoft Azure](https://docs.gitlab.com/ee/install/azure/), enabling teams to automate their cloud deployments directly from their pipelines. Through GitLab's cloud integrations, teams can manage cloud resources, deploy applications, and monitor cloud services all within the GitLab interface. The platform's built-in cloud deployment templates and [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) features significantly reduce the complexity of cloud deployments, allowing teams to focus on application development rather than infrastructure management. For organizations that want to automate their IT   infrastructure using GitOps, GitLab has a [Flux CD integration](https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab/).\n\nGitLab's cloud capabilities extend beyond basic deployment automation. The platform's [Kubernetes integration](https://about.gitlab.com/blog/kubernetes-overview-operate-cluster-data-on-the-frontend/) enables teams to manage container orchestration across multiple cloud providers, while the [cloud native GitLab installation options](https://about.gitlab.com/topics/ci-cd/cloud-native-continuous-integration/) allow the platform itself to run in cloud environments. Through GitLab's cloud-native features, teams can implement auto-scaling runners that dynamically provision cloud resources for pipeline execution, optimizing costs and performance. The platform's integration with cloud provider security services ensures that security and compliance requirements are met throughout the deployment process.\n\nFor multi-cloud environments, GitLab provides consistent workflows and tooling regardless of the underlying cloud provider. Teams can use GitLab's environment management features to handle different cloud configurations across development, staging, and production environments. The platform's [infrastructure as code](https://docs.gitlab.com/ee/user/infrastructure/iac/) support, particularly its native integration with Terraform, enables teams to version control and automate their cloud infrastructure provisioning. GitLab's monitoring and observability features integrate with cloud provider metrics, providing comprehensive visibility into application and infrastructure health across cloud environments.\n\n## Advanced CI/CD \nCI/CD has evolved far beyond simple build and deploy pipelines. In advanced implementations, CI/CD involves sophisticated orchestration of automated testing, security scanning, infrastructure provisioning, AI, and more. Here are a few advanced CI/CD strategies that can help engineering teams scale their pipelines and troubleshoot issues even as architectural complexity grows.\n\n### Reuse and automation in CI/CD\n\nGitLab is transforming how development teams create and manage CI/CD pipelines with two major innovations: the [CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/) and [CI/CD steps](https://about.gitlab.com/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation/), a new programming language for DevSecOps automation currently in experimental phase. The CI/CD Catalog is a centralized platform where developers can discover, reuse, and contribute CI/CD components. Components function as reusable, single-purpose building blocks that simplify pipeline configuration — similar to Lego pieces for CI/CD workflows. Meanwhile, CI/CD steps support complex workflows by allowing developers to compose inputs and outputs for a CI/CD job. With the CI/CD Catalog and CI/CD steps, DevSecOps teams can easily standardize CI/CD and its components, simplifying the process of developing and maintaining CI/CD pipelines.\n\n> Learn more in our [CI/CD Catalog FAQ](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/) and [CI/CD steps documentation](https://docs.gitlab.com/ee/ci/steps/).\n\n### Troubleshooting pipelines with AI\n\nWhile CI/CD pipelines can and do break, troubleshooting the issue quickly can minimize the impact. GitLab Duo Root Cause Analysis, part of a suite of AI-powered features, removes the guesswork by [determining the root cause for a failed CI/CD pipeline](https://about.gitlab.com/blog/quickly-resolve-broken-ci-cd-pipelines-with-ai/). When a pipeline fails, GitLab provides detailed job logs, error messages, and execution traces that show exactly where and why the failure occurred. Root Cause Analysis then uses AI to suggest a fix.\nWatch GitLab Duo Root Cause Analysis in action:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube.com/embed/sTpSLwX5DIs?si=J6-0Bf6PtYjrHX1K\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## How to migrate to GitLab CI/CD\n\nMigrating to the DevSecOps platform and its built-in CI/CD involves a systematic approach of analyzing your existing pipeline configurations, dependencies, and deployment processes to map them to GitLab's equivalent features and syntax. Use these guides to help make the move.\n\n* [How to migrate from Bamboo to GitLab CI/CD](https://about.gitlab.com/blog/migrating-from-bamboo-to-gitlab-cicd/)\n* [Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment](https://about.gitlab.com/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment/)\n* [GitHub to GitLab migration the easy way](https://about.gitlab.com/blog/github-to-gitlab-migration-made-easy/)\n\n## Lessons from leading organizations\n\nThese leading organizations migrated to GitLab and are enjoying the myriad benefits of CI/CD. Read their stories.\n\n- [Lockheed Martin](https://about.gitlab.com/customers/lockheed-martin/)\n- [Indeed](https://about.gitlab.com/blog/how-indeed-transformed-its-ci-platform-with-gitlab/)\n- [CARFAX](https://about.gitlab.com/customers/carfax/)\n- [HackerOne](https://about.gitlab.com/customers/hackerone/)\n- [Betstudios](https://about.gitlab.com/blog/betstudios-cto-on-improving-ci-cd-capabilities-with-gitlab-premium/)\n- [Thales and Carrefour](https://about.gitlab.com/blog/how-carrefour-and-thales-are-evolving-their-ci-cd-platforms/)\n\n## CI/CD tutorials\n\nBecome a CI/CD expert with these easy-to-follow tutorials.\n\n* [Basics of CI: How to run jobs sequentially, in parallel, or out of order](https://about.gitlab.com/blog/basics-of-gitlab-ci-updated/)\n* [How to set up your first GitLab CI/CD component](https://about.gitlab.com/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component/)\n* [Building a GitLab CI/CD pipeline for a monorepo the easy way](https://about.gitlab.com/blog/building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way/)\n* [Using child pipelines to continuously deploy to five environments](https://about.gitlab.com/blog/using-child-pipelines-to-continuously-deploy-to-five-environments/)\n* [CI/CD automation: Maximize 'deploy freeze' impact across GitLab groups](https://about.gitlab.com/blog/ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups/)\n* [Refactoring a CI/CD template to a CI/CD component](https://about.gitlab.com/blog/refactoring-a-ci-cd-template-to-a-ci-cd-component/)\n* [Annotate container images with build provenance using Cosign in GitLab CI/CD](https://about.gitlab.com/blog/annotate-container-images-with-build-provenance-using-cosign-in-gitlab-ci-cd)\n\n> #### Get started with GitLab CI/CD. [Sign up for GitLab Ultimate](https://about.gitlab.com/free-trial/devsecops/) and try the AI-powered DevSecOps platform free for 60 days.",[111,9,484,749,750,680],"tutorial","security",{"slug":752,"featured":93,"template":683},"ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation","content:en-us:blog:ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation.yml","Ultimate Guide To Ci Cd Fundamentals To Advanced Implementation","en-us/blog/ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation.yml","en-us/blog/ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation",{"_path":758,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":759,"content":765,"config":773,"_id":775,"_type":16,"title":776,"_source":18,"_file":777,"_stem":778,"_extension":21},"/en-us/blog/icymi-key-ai-and-security-insights-from-our-developer-community",{"title":760,"description":761,"ogTitle":760,"ogDescription":761,"noIndex":6,"ogImage":762,"ogUrl":763,"ogSiteName":697,"ogType":698,"canonicalUrls":763,"schema":764},"ICYMI: Key AI and security insights from our developer community","Our latest LinkedIn Live highlights the hottest trends in AI, security, DevSecOps, and more. Also get a taste of the GitLab community contributions that are making an impact.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098331/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%286%29_55zMmdJIUpfh5qaPW9dtVA_1750098331584.png","https://about.gitlab.com/blog/icymi-key-ai-and-security-insights-from-our-developer-community","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ICYMI: Key AI and security insights from our developer community\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2024-12-05\",\n      }",{"title":760,"description":761,"authors":766,"heroImage":762,"date":768,"body":769,"category":14,"tags":770},[767],"Fatima Sarah Khalid","2024-12-05","In our [November LinkedIn Live broadcast](https://www.linkedin.com/feed/update/urn:li:activity:7265408726696697857), we brought together field CTOs, developer advocates, and community leaders to discuss industry trends and showcase features making a difference in developer workflows.\n\nHere are 5 key highlights:\n\n### 1. AI adoption trends from the field\nOur field CTOs shared insights on how organizations are embracing AI across their development workflows. For instance, Field CTO Cherry Han highlighted how financial organizations are thinking beyond individual developer tools.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035388263?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Ai Adoption Trends from the Field\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n\u003Cbr>\u003C/br>\nAndrew Hasker, Field CTO for Asia Pacific and Japan, offered valuable perspective on AI adoption.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035388277?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"From Field CTOs\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 2. Security coverage that makes a difference\n\nStaff Developer Advocate Fernando Diaz demonstrated how GitLab's security scanners cover the complete application lifecycle, showing how easy it is to implement [comprehensive security scanning](https://about.gitlab.com/solutions/security-compliance/) with just a few lines of code.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035388297?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Security Coverage\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 3. AI-powered language migration made simple\nIn an impressive demonstration, Senior Technical Marketing Manager Cesar Saavedra showed how GitLab Duo can assist in migrating applications between programming languages.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1036170482?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"AI-Powered Language Migration Made Simple\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 4. Making DevSecOps work smarter\n\nDeveloper Advocate Abubakar Siddiq Ango showcased how GitLab's triage features can automate routine tasks.\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035388290?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Making DEvOps Work Smarter\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 5. Community contributions making an impact\n\nDirector of Contributor Success Nick Veenhof shared how community contributions are shaping GitLab's development:\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035395211?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Community Contributions Making an Impact\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Watch on-demand\n\n[Watch the complete broadcast recording](https://www.linkedin.com/feed/update/urn:li:activity:7265408726696697857) for step-by-step demonstrations and insights from our experts. Also, be sure to [follow GitLab on LinkedIn](https://www.linkedin.com/company/gitlab-com) to stay up to date on our monthly broadcasts and get insights into our platform, DevSecOps, and software development.\n",[771,750,269,749,772],"AI/ML","webcast",{"slug":774,"featured":6,"template":683},"icymi-key-ai-and-security-insights-from-our-developer-community","content:en-us:blog:icymi-key-ai-and-security-insights-from-our-developer-community.yml","Icymi Key Ai And Security Insights From Our Developer Community","en-us/blog/icymi-key-ai-and-security-insights-from-our-developer-community.yml","en-us/blog/icymi-key-ai-and-security-insights-from-our-developer-community",{"_path":780,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":781,"content":787,"config":794,"_id":796,"_type":16,"title":797,"_source":18,"_file":798,"_stem":799,"_extension":21},"/en-us/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci",{"title":782,"description":783,"ogTitle":782,"ogDescription":783,"noIndex":6,"ogImage":784,"ogUrl":785,"ogSiteName":697,"ogType":698,"canonicalUrls":785,"schema":786},"Develop C++ unit testing with Catch2, JUnit, and GitLab CI","Learn how to set up, write, and automate C++ unit tests using Catch2 with GitLab CI/CD. See examples from a working air quality app project and AI-powered help from GitLab Duo.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659684/Blog/Hero%20Images/AdobeStock_479904468__1_.jpg","https://about.gitlab.com/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Develop C++ unit testing with Catch2, JUnit, and GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2024-07-02\",\n      }",{"title":782,"description":783,"authors":788,"heroImage":784,"date":789,"body":790,"category":14,"tags":791},[767],"2024-07-02","Continuous integration (CI) and automated testing are important DevSecOps workflows for software developers to detect bugs early, improve code quality, and streamline their development processes. \n\nIn this tutorial, you'll learn how to set up unit testing on a `C++` project with [Catch2](https://github.com/catchorg/Catch2) and GitLab CI for continuous integration. You'll also see how the AI-powered features of [GitLab Duo](https://about.gitlab.com/gitlab-duo/) can help. We’ll use [an air quality monitoring application](https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/air-quality-app) as our reference project.\n\n## Prerequisites\n\n- Ensure you have [CMake](https://cmake.org/ \"CMake\") installed on your machine. \n- A modern `C++` compiler such as GCC or Clang is required. \n- An API key from [OpenWeatherMap](https://openweathermap.org/api) - requires signing up for a free account (1,000/calls per day are included for free). \n\n## Set up the application for testing\n\nThe reference project we’ll be using for demonstrating testing in this blog post is an air quality monitoring application that fetches air quality data from the OpenWeatherMap API based on the U.S zip codes only provided by the user.\n\nHere are the steps to set up the application for testing:\n\n1. Fork the [the reference project](https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/air-quality-app) and clone the fork to your local environment.\n\n2. Generate an API key from  [OpenWeatherMap](https://openweathermap.org/) and export it into the environment. \n\n```shell\nexport API_KEY=\"YOURAPIKEY_HERE\"\n```\n\n3. Alternatively, you can add the key into your `.env` configuration, and source it with `source ~/.env`, or use a different mechanism to populate the environment.\n\n4. Compile and build the project code with the following instructions:\n\n```cpp\ncmake -S . -B build\ncmake --build build\n```\n\n5. Run the application using the executable and passing in a U.S zip code (90210 as an example): \n\n```cpp\n./build/air_quality_app 90210\n```\n\nHere’s an example of what running the program will look like in your terminal:  \n\n```bash\n❯ ./build/air_quality_app 90210\nAir Quality Index (AQI) for Zip Code 90210: 2 (Fair)\n```\n\n## Install Catch2\n\nNow that the application is set up and working, let's start working on adding testing using Catch2. Catch2 is a modern, `C++-native` testing framework for unit tests. \n\nYou can also ask GitLab Duo Chat within your IDE for an introduction to getting started with Catch2 as a `C++` testing framework. GitLab Duo Chat will provide getting started steps as well as an example test: \n\n![GitLab Duo Chat starting steps and example test](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676997/Blog/Content%20Images/1.duo-chat-installing-catch2.png)\n\n1. First navigate to your project’s root directory and create an externals folder using the `mkdir` command.\n\n```shell\nmkdir externals\n```\n\n2. There are several ways to install Catch2 via [its CMake integration](https://github.com/catchorg/Catch2/blob/devel/docs/cmake-integration.md#top). We will use the option of installing it as a submodule and including it as part of the source code to simplify dependency management. To add Catch2 to your project in the `externals` folder: \n\n```shell\ngit submodule add https://github.com/catchorg/Catch2.git externals/Catch2\ngit submodule update --init --recursive\n```\n\n3. Update `CMakeLists.txt` to include Catch2’s directory as a subdirectory. This allows CMake to find and build Catch2 as a part of our project. \n\n```cpp\n# Assuming Catch2 in externals/Catch2\nadd_subdirectory(externals/Catch2)\n```\n\n4. Create a `tests.cpp` file in your project root to write our tests to: \n\n```shell\ntouch tests.cpp\n```\n\n5. Update `CMakeLists.txt` Link against Catch2. When defining your test executable in CMake, link it against Catch2:\n\n```cpp\n# Add tests executable and link it to Catch2\nadd_executable(tests test.cpp)\ntarget_link_libraries(tests PRIVATE Catch2::Catch2WithMain)\n```\n\n## Structure the project for testing\n\nBefore we start writing our tests, we should separate our application logic into separate files in order to maintain and test our code more efficiently. At the end of this section we should have:\n\n```\nmain.cpp containing only the main() function and application setup\nincludes/functions.cpp containing all functional code such as API calls and data processing: \nincludes/functions.h containing the declarations for the functions defined in functions.cpp.  It needs to define the preprocessor macro guards, and include all necessary headers. \n```\n\nApply the following changes to the files: \n\n1. `main.cpp`\n\n```cpp\n#include \u003Ciostream>\n#include \"functions.h\"\n\nint main(int argc, char* argv[]) {\n   if (argc \u003C 2) {\n       std::cerr \u003C\u003C \"Usage: \" \u003C\u003C argv[0] \u003C\u003C \" \u003CZip Code>\" \u003C\u003C std::endl;\n       return 1;\n   }\n\n   std::string zipCode = argv[1];\n   std::string apiKey = getApiKey();\n   if (apiKey.empty()) {\n       std::cerr \u003C\u003C \"API key not found.\" \u003C\u003C std::endl;\n       return 1;\n   }\n\n   auto [lat, lon] = geocodeZipcode(zipCode, apiKey);\n   if (lat == 0 && lon == 0) {\n       std::cerr \u003C\u003C \"Failed to geocode zipcode.\" \u003C\u003C std::endl;\n       return 1;\n   }\n\n   std::string response = fetchAirQuality(lat, lon, apiKey);\n   std::string airQualityInfo = parseAirQualityResponse(response);\n\n   std::cout \u003C\u003C \"Air Quality Index for Zip Code \" \u003C\u003C zipCode \u003C\u003C \": \" \u003C\u003C airQualityInfo \u003C\u003C std::endl;\n\n   return 0;\n}\n```\n\n2. Create a `functions.h:` in the `includes` folder: \n\n```cpp\n#ifndef FUNCTIONS_H\n#define FUNCTIONS_H\n\n#include \u003Cstring>\n#include \u003Cutility>\n#include \u003Cvector>\n\n// Declare the function prototype\nstd::string httpRequest(const std::string& url);\nbool loadEnvFile(const std::string& filename);\nstd::string getApiKey();\nstd::pair\u003Cdouble, double> geocodeZipcode(const std::string& zipCode, const std::string& apiKey);\nstd::string fetchAirQuality(double lat, double lon, const std::string& apiKey);\nstd::string parseAirQualityResponse(const std::string& response);\n\n#endif\n```\n\n3. Create a `functions.cpp` in the `includes` folder: \n\n```cpp\n#include \"functions.h\"\n#include \u003Cfstream>\n#include \u003Celnormous/HTTPRequest.hpp>\n#include \u003Cnlohmann/json.hpp>\n#include \u003Ciostream>\n#include \u003Ccstdlib> // For getenv\n\nstd::string httpRequest(const std::string& url) {\n   try {\n       http::Request request{url};\n       const auto response = request.send(\"GET\");\n       return std::string{response.body.begin(), response.body.end()};\n   } catch (const std::exception& e) {\n       std::cerr \u003C\u003C \"Request failed, error: \" \u003C\u003C e.what() \u003C\u003C std::endl;\n       return \"\";\n   }\n}\nstd::string getApiKey() {\n   const char* envApiKey = std::getenv(\"API_KEY\");\n   if (envApiKey) {\n       return std::string(envApiKey);\n   }\n   // If the environment variable is not set, fallback to the config file\n   std::ifstream configFile(\"config.txt\");\n   std::string line;\n   if (getline(configFile, line)) {\n       return line.substr(line.find('=') + 1);\n   }\n   return \"\";\n}\n\nstd::pair\u003Cdouble, double> geocodeZipcode(const std::string& zipCode, const std::string& apiKey) {\n   std::string url = \"http://api.openweathermap.org/geo/1.0/zip?zip=\" + zipCode + \",US&appid=\" + apiKey;\n   std::string response = httpRequest(url);\n   try {\n       auto json = nlohmann::json::parse(response);\n       if (json.contains(\"lat\") && json.contains(\"lon\")) {\n           double lat = json[\"lat\"];\n           double lon = json[\"lon\"];\n           return {lat, lon};\n       } else {\n           std::cerr \u003C\u003C \"Geocode response missing 'lat' or 'lon' fields: \" \u003C\u003C response \u003C\u003C std::endl;\n       }\n   } catch (const nlohmann::json::parse_error& e) {\n       std::cerr \u003C\u003C \"Failed to parse geocode response: \" \u003C\u003C e.what() \u003C\u003C \" - Response: \" \u003C\u003C response \u003C\u003C std::endl;\n   }\n   return {0, 0};\n}\n\nstd::string fetchAirQuality(double lat, double lon, const std::string& apiKey) {\n   std::string url = \"http://api.openweathermap.org/data/2.5/air_pollution?lat=\" + std::to_string(lat) + \"&lon=\" + std::to_string(lon) + \"&appid=\" + apiKey;\n   std::string response = httpRequest(url);\n   return response;\n}\n\nstd::string parseAirQualityResponse(const std::string& response) {\n   try {\n       auto json = nlohmann::json::parse(response);\n       if (json.contains(\"list\") && !json[\"list\"].empty() && json[\"list\"][0].contains(\"main\")) {\n           int aqi = json[\"list\"][0][\"main\"][\"aqi\"];\n           std::string aqiCategory;\n           switch (aqi) {\n               case 1:\n                   aqiCategory = \"Good\";\n                   break;\n               case 2:\n                   aqiCategory = \"Fair\";\n                   break;\n               case 3:\n                   aqiCategory = \"Moderate\";\n                   break;\n               case 4:\n                   aqiCategory = \"Poor\";\n                   break;\n               case 5:\n                   aqiCategory = \"Very Poor\";\n                   break;\n               default:\n                   aqiCategory = \"Unknown\";\n                   break;\n           }\n           return std::to_string(aqi) + \" (\" + aqiCategory + \")\";\n       } else {\n           return \"No AQI data available\";\n       }\n   } catch (const std::exception& e) {\n       std::cerr \u003C\u003C \"Failed to parse JSON response: \" \u003C\u003C e.what() \u003C\u003C std::endl;\n       return \"Error parsing AQI data\";\n   }\n}\n\n```\n\n4. Now that we have separated the source files, we also need to update our `CMakeLists.txt` to include `functions.cpp` in the `add_executable()` calls:\n\n```cpp\ncmake_minimum_required(VERSION 3.14)\nproject(air-quality-app)\n\n# Set the C++ standard for the project\nset(CMAKE_CXX_STANDARD 17)\nset(CMAKE_CXX_STANDARD_REQUIRED ON)\nset(CMAKE_CXX_EXTENSIONS OFF)\n\ninclude_directories(${CMAKE_SOURCE_DIR}/includes)\n\n# Define the main program executable\nadd_executable(air_quality_app main.cpp includes/functions.cpp)\n\n# Assuming Catch2 in externals/Catch2\nadd_subdirectory(externals/Catch2)\n\n# Add tests executable and link it to Catch2\nadd_executable(tests tests.cpp includes/functions.cpp)\ntarget_link_libraries(tests PRIVATE Catch2::Catch2WithMain)\n```\n\nTo verify that the changes are working, regenerate the CMake configuration and rebuild the source code with the following commands. The build will take longer now that we're compiling Catch2 files. \n\n```shell\nrm -rf build # delete existing build files\ncmake -S . -B build \ncmake --build build  \n```\n\nYou should be able to run the application without any errors.\n\n```shell\n./build/air_quality_app 90210\n```\n\n## Write tests in Catch2  \n\nCatch2 tests are made up of [macros and assertions](https://github.com/catchorg/Catch2/blob/devel/docs/assertions.md). Macros in Catch2 are used to define test cases and sections within those test cases. They help in organizing and structuring the tests. Assertions are used to verify that the code behaves as expected. If an assertion fails, the test case will fail, and Catch2 will report the failure.\n\nLet’s review a basic test scenario for an addition function to understand. Note: This test is read-only, as an example. \n\n```cpp\nint add(int a, int b) {\n   return a + b;\n}\n\nTEST_CASE(\"Addition works correctly\", \"[math]\") {\n   REQUIRE(add(1, 1) == 2);  // Test passes if 1+1 equals 2\n   REQUIRE(add(2, 2) != 5);  // Test passes if 2+2 does not equal 5\n}\n```\n\n- Each test begins with the `TEST_CASE` macro, which defines a test case container. The macro accepts two parameters: a string describing the test case and optionally a second string for tagging the test for easy filtering.\n- Tests are also composed of assertions, which are statements that check if conditions are true. Catch2 provides macros for assertion that include `REQUIRE`, which aborts the current test if the assertion fails, and `CHECK`, which logs the failure but continues with the current test.\n\n### Prepare to write tests with Catch2\n\nTo test the API retrieval functions in our air quality application, we’ll be using mock API requests. Mock API testing is a technique used to test how your application will interact with an external API without making any real API calls. Instead of sending requests to a live API server, we can simulate the responses using predefined data. Mock requests allow us to control the input data and specify exactly what the API would return for different requests, making sure that our tests aren't affected by changes in the real API responses or unexpected data. This also makes it easier for us to simulate and catch different failures.\n\nIn our `tests.cpp` file, let’s define the following function to run mock API requests.   \n\n```cpp\n#include \"includes/functions.h\"\n#include \u003Ccatch2/catch_test_macros.hpp>\n#include \u003Cstring>\n\n// Mock HTTP request function that simulates API responses\nstd::string mockHttpRequest(const std::string& url) {\n   if (url.find(\"geo\") != std::string::npos) {\n       // Mock response for geocoding\n       return R\"({\"lat\": 40.7128, \"lon\": -74.0060})\"; \n   } else if (url.find(\"air_pollution\") != std::string::npos) {\n       // Mock response for air quality\n       return R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\";\n   }\n   // Default mock response for unmatched endpoints\n   return \"{}\";\n}\n// Overriding the actual httpRequest function with the mockHttpRequest for testing\nstd::string httpRequest(const std::string& url) {\n   return mockHttpRequest(url);\n}\n```\n\n- This function simulates HTTP requests and returns predefined JSON responses based on the URL given as input. \n- It also checks the URL to determine which type of data is being requested based on the functionality of the application (geocoding, air pollution, or forecast data). If the URL doesn’t match the expected endpoint, it returns an empty JSON object. \n\nDon't compile the code just yet, as you'll see a linker error. Since we're overriding the original `httpRequest` function with our mock function for testing, we'll need a preprocessor macro to enable conditional compilation - indicating which `httpRequest` function should run when we're compiling tests. \n\n#### Define a preprocessor macro for testing  \n\nBecause we’ve overridden `httpRequest` in our `tests.cpp`, we need to exclude that code from `functions.cpp` when we’re testing. When building tests, we may need to ensure that certain parts of our code behave differently or are excluded. We can do this by defining a preprocessor macro `TESTING` which enables conditional compilation, allowing us to selectively include or exclude code when compiling the test target:  \n\nWe define the `TESTING` macro in our `CMakeLists.txt` at the end:  \n\n```cpp\n# Define TESTING macro for this target\ntarget_compile_definitions(tests PRIVATE TESTING)\n```\n\nAnd add the macro wrapper in  `functions.cpp` around the original `httpRequest` function:  \n\n```cpp\n#ifndef TESTING  // Exclude this part when TESTING is defined\nstd::string httpRequest(const std::string& url) {\n   try {\n       http::Request request{url};\n       const auto response = request.send(\"GET\");\n       return std::string{response.body.begin(), response.body.end()};\n   } catch (const std::exception& e) {\n       std::cerr \u003C\u003C \"Request failed, error: \" \u003C\u003C e.what() \u003C\u003C std::endl;\n       return \"\";\n   }\n}\n#endif\n```\n\nRegenerate the CMake configuration and rebuild the source code to verify it works.\n\n```shell\ncmake --build build  \n```\n\n### Write the first tests \n\nNow, let’s write some tests for our air quality application.\n\n#### Test 1: Verify API key retrieval \n\nThis test ensures that the `getApiKey` function retrieves the API key correctly from the environment variable or the configuration file. Add the test case to our `tests.cpp`:\n\n```cpp\n\nTEST_CASE(\"API Key Retrieval\", \"[api]\") {\n   // Set the API_KEY environment variable for testing\n   setenv(\"API_KEY\", \"test_key\", 1);\n   // Test if the key is retrieved correctly\n   REQUIRE(getApiKey() == \"test_key\");\n}\n```\n\nYou can verify that this tests passes by rebuilding the code and running the tests:\n\n```shell\ncmake --build build\n./build/tests\n```\n\n#### Test 2: Geocode the zip code\n\nThis test ensures that the `geocodeZipcode` function returns the correct latitude and longitude for a given zip code using the mock API response function we set up earlier. The  `geocodeZipcode` function is supposed to hit an API that returns geographic coordinates based on a zip code. \n\nIn `tests.cpp`, add this test case for the zip code 90210: \n\n```cpp\nTEST_CASE(\"Geocode Zip code\", \"[geocode]\") {\n   std::string apiKey = \"test_key\";\n   std::pair\u003Cdouble, double> coordinates = geocodeZipcode(\"90210\", apiKey);\n   // Check latitude\n   REQUIRE(coordinates.first == 40.7128);\n   // Check longitude \n   REQUIRE(coordinates.second == -74.0060);\n}\n```\n\nThe purpose of this test is to verify that the function `geocodeZipcode` can correctly parse the latitude and longitude from the API response. By hardcoding the expected response, we ensure that the test environment is controlled and predictable.\n\n #### Test 3: Air quality API test\n\nThis test ensures that the `fetchAirQuality` function correctly fetches air quality data using the mock API response function we set up earlier. It verifies that the function constructs the API request properly, sends it, and accurately parses the air quality index (AQI) from the mock JSON response. This validation helps ensure that the overall process of fetching and interpreting air quality data works as intended.\n\n```cpp\nTEST_CASE(\"Fetch Air Quality\", \"[airquality]\") {\n   std::string apiKey = \"test_key\";\n   double lat = 40.7128;\n   double lon = -74.0060;\n   std::string response = fetchAirQuality(lat, lon, apiKey);\n   // Check the response\n   REQUIRE(response == R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\");\n}\n```\n\n## Build and run the tests\n\nTo  build and compile our application, we'll use the same CMake commands as before:\n\n```cpp\ncmake -S . -B build\ncmake --build build\n\n```\n\nAfter building, we can run our tests by executing the test binary:  \n\n```cpp\n./build/tests\n\n```\n\nRunning this command will execute all defined tests, and you will see output indicating whether each test has passed or failed.\n\n![Output showing pass/fail of tests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/2.running-catch2-tests.png)\n\n## Set up GitLab CI/CD\n\nTo automate the testing process each time we push some new code to our repository, let’s set up [GitLab CI/CD](https://about.gitlab.com/topics/ci-cd/). Create a new `.gitlab-ci.yml` configuration file in the root directory. \n\n```yaml\nimage: gcc:latest\n\nvariables:\n GIT_SUBMODULE_STRATEGY: recursive\n\nstages:\n - build\n - test\n\nbefore_script:\n - apt-get update && apt-get install -y cmake\n\ncompile:\n stage: build\n script:\n   - cmake -S . -B build\n   - cmake --build build\n artifacts:\n   paths:\n     - build/\n\ntest:\n stage: test\n script:\n   - ./build/tests --reporter junit -o test-results.xml\n artifacts:\n   reports:\n     junit: test-results.xml\n```\n\nThis CI/CD configuration will compile both the main application and the test suite, then run the tests, generating a JUnit XML report which GitLab uses to display the test results.  \n\n- In `before_script`, we added an installation for `cmake`, and `git submodule sync --recursive` which initializes and updates our submodules (catch2). \n- In the `test` stage, `--reporter junit -o test-results.xml` specifies that the test results should be treated as a JUnit report which allows GitLab CI to display results in the UI. This is super helpful when you have several tests in your application.  \n\nWe also need to [add an environmental variable](https://docs.gitlab.com/ee/ci/variables/#define-a-cicd-variable-in-the-ui) with the `API_KEY` in project settings on GitLab.\n\nDon’t forget to add all new files to Git, and commit and push the changes in a new MR:\n\n```shell\ngit checkout -b tests-catch2-cicd\n\ngit add includes/functions.{h,cpp} tests.cpp .gitlab-ci.yml \ngit add CMakeLists.txt main.cpp \n\ngit commit -vm “Add Catch2 tests and CI/CD configuration”\ngit push \n```\n\n## View the test report\n\nAfter pushing our code changes, we can review the results of our tests in the GitLab UI in the Pipeline view in the `Tests` tab:\n\n![GitLab pipeline view shows test results](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/2.0-passed-tests-UI.png)\n\n## Simulate a test failure\n\nTo demonstrate how our UI will handle test failures, we can intentionally introduce a bug into our code and observe the resulting behavior. \n\nLet's modify our `parseAirQualityResponse` function to introduce an error. We can change the AQI category for an AQI value of 2 from \"Fair\" to \"Poor.\" This change will cause the related test to fail, allowing us to see the test failure in the GitLab UI.\n\nIn `functions.cpp`, find the `parseAirQualityResponse` function and modify the switch statement for case `2` to set the `Poor` value instead of `Fair`:\n\n```cpp\n               // Intentional bug:\n               case 2:\n                   aqiCategory = \"Poor\";\n                   break;\n```\n\nIn tests.cpp, add a new test case that directly checks the output of the `parseAirQualityResponse` function. This test ensures that the `parseAirQualityResponse` function correctly parses and categorizes the air quality data from the mock API response. This function takes a JSON response, extracts the AQI value, and translates it into a human-readable category.\n\n```cpp\n\nTEST_CASE(\"Parse Air Quality Response\", \"[airquality]\") {\n   std::string mockResponse = R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\";\n   std::string result = parseAirQualityResponse(mockResponse);\n   // This should fail due to the intentional bug\n   REQUIRE(result == \"2 (Fair)\");\n}\n\n```\n\nCommit the changes, and push them into the MR. Open the MR in your browser. \n\nBy introducing an intentional bug in this function, we can see how a test failure is reported in GitLab's pipelines UI. We must add, commit, and push the changes to our repository to view the test failure in the pipeline. \n\n![Simulated test failure](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/2.1-failed-test-simulation.png)\n\n![Details of the simulated failed test](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/2.2-failed-test-simulation-details.png)\n\nOnce we've verified this simulated test failure, we can use `git revert` to roll back that commit. \n\n```shell\ngit revert\n```\n\n## Add and test a new feature\n\nLet’s put what you've learned together by creating a new feature in the air quality application and then writing a test for that feature using Catch2. The new feature will fetch the current weather forecast for the provided zip code.\n\nFirst, we'll define a `Weather` struct and add the function prototype in our `functions.h` file (inside the `#endif`):\n\n```cpp\n\nstruct Weather {\n   std::string main;\n   std::string description;\n   double temperature;\n};\n\nWeather getCurrentWeather(const std::string& apiKey, double lat, double lon);\n```\n\nThen, we implement the `getCurrentWeather` function in `functions.cpp`. This function calls the OpenWeatherMap API to retrieve the current weather and parses the JSON response. This code was generated using [GitLab Duo](https://about.gitlab.com/gitlab-duo/). If you start typing `Weather getCurrentWeather(const std::string& apiKey, double lat, double lon) {` to complete the function, GitLab Duo will provide the function contents for you, line by line. \n\n![GitLab Duo completing the function contents](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/3.get-current-weather-function-completion.png)\n\nHere's what your `getCurrentWeather()` function can look like: \n\n```cpp\n\nWeather getCurrentWeather(const std::string& apiKey, double lat, double lon) {\n   std::string url = \"http://api.openweathermap.org/data/2.5/weather?lat=\" + std::to_string(lat) + \"&lon=\" + std::to_string(lon) + \"&appid=\" + apiKey;\n   std::string response = httpRequest(url);\n   auto json = nlohmann::json::parse(response);\n   Weather weather;\n   if (!json.is_null()) {\n       weather.main = json[\"weather\"][0][\"main\"];\n       weather.description = json[\"weather\"][0][\"description\"];\n       weather.temperature = json[\"main\"][\"temp\"];\n   }\n   return weather;\n}\n```\n\nAnd, finally, we update our `main.cpp` file in the main function to output the current forecast (and converting Kelvin to Celsius for the output):  \n\n```cpp\n   Weather currentWeather = getCurrentWeather(apiKey, lat, lon);\n   if (currentWeather.main.empty()) {\n       std::cerr \u003C\u003C \"Failed to fetch current weather.\" \u003C\u003C std::endl;\n       return 1;\n   }\n\n   std::cout \u003C\u003C \"Current Weather: \" \u003C\u003C currentWeather.main \u003C\u003C \", \" \u003C\u003C currentWeather.description\n       \u003C\u003C \", temperature \" \u003C\u003C currentWeather.temperature - 273.15 \u003C\u003C \" °C\" \u003C\u003C std::endl;\n```\n\nWe can confirm that our new feature is working by building and running the application:  \n\n```shell\ncmake --build build\n./build/air_quality_app \n```\n\nAnd we should see the following output or similar in case the weather is different on the day the code is run :)\n\n```\nAir Quality Index for Zip Code 90210: 2 (Poor)\nCurrent Weather: Clouds, broken clouds, temperature 23.2 °C\n```\n\nWith all new functionality, there should be testing! We can also write a test to check whether the application is fetching and parsing a weather forecast correctly. This test checks that the function returns a list containing the correct number of forecast entries and that each entry has accurate data regarding time and temperature.\n\n```cpp\nTEST_CASE(\"Current Weather functionality\", \"[api]\") {\n   auto weather = getCurrentWeather(\"dummyApiKey\", 40.7128, -74.0060);\n   // Ensure main weather description is not empty\n   REQUIRE_FALSE(weather.main.empty());\n   // Validate that temperature is a reasonable value\n   REQUIRE(weather.temperature > 0); \n}\n```\n\nWe’ll also have to update our `mockHTTPRequest` function in `tests.cpp` to account for this new test. Modify the if-condition with a new else-if branch checking for the `weather` string in the URL:  \n\n```cpp\n// Mock HTTP request function that simulates API responses\nstd::string mockHttpRequest(const std::string &url)\n{\n   if (url.find(\"geo\") != std::string::npos)\n   {\n       // Mock response for geocoding\n       return R\"({\"lat\": 40.7128, \"lon\": -74.0060})\";\n   }\n   else if (url.find(\"air_pollution\") != std::string::npos)\n   {\n       // Mock response for air quality\n       return R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\";\n   }\n   else if (url.find(\"weather\") != std::string::npos)\n   {\n       // Mock response for current weather\n       return R\"({\n          \"weather\": [{\"main\": \"Clear\", \"description\": \"clear sky\"}],\n          \"main\": {\"temp\": 298.55}\n      })\";\n   }\n   return \"{}\";\n}\n```\n\nAnd verify that our tests are working by rebuilding and running our tests:  \n\n```shell\ncmake --build build \n./build/tests\n```\n\nAll tests should pass, including the new one for Current Weather Functionality. \n\n## Optimize tests.cpp with sections\n\nTo better organize our tests as the project grows and categorize each functionality, we can use Catch2’s `SECTION` macro. The `SECTION` macro allows you to define logically separate test scenarios within a single test case, providing a clean way to test different behaviors or conditions without requiring multiple separate test cases or multiple files. This approach keeps related tests bundled together and also improves test maintainability by allowing shared setup code to be executed repeatedly for each section.\n\nSince some of our functionality is preprocessing data to retrieve information, let’s section our tests as such:\n- preprocessing steps: \n\t- API key validation\n\t- geocoding validation\n-  API data retrieval:\n\t- air pollution retrieval \n\t- forecast retrieval\n\nHere’s what our `tests.cpp` will look like if organized by sections: \n\n```cpp\n#include \"functions.h\"\n#include \u003Ccatch2/catch_test_macros.hpp>\n#include \u003Cstring>\n\n// Mock HTTP request function that simulates API responses\nstd::string mockHttpRequest(const std::string &url)\n{\n   if (url.find(\"geo\") != std::string::npos)\n   {\n       // Mock response for geocoding\n       return R\"({\"lat\": 40.7128, \"lon\": -74.0060})\";\n   }\n   else if (url.find(\"air_pollution\") != std::string::npos)\n   {\n       // Mock response for air quality\n       return R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\";\n   }\n   else if (url.find(\"weather\") != std::string::npos)\n   {\n       // Mock response for current weather\n       return R\"({\n          \"weather\": [{\"main\": \"Clear\", \"description\": \"clear sky\"}],\n          \"main\": {\"temp\": 298.55}\n      })\";\n   }\n   return \"{}\";\n}\n\n// Overriding the actual httpRequest function with the mockHttpRequest for testing\nstd::string httpRequest(const std::string &url)\n{\n   return mockHttpRequest(url);\n}\n\n// Preprocessing Steps\nTEST_CASE(\"Preprocessing Steps\", \"[preprocessing]\") {\n   SECTION(\"API Key Retrieval\") {\n       // Set the API_KEY environment variable for testing\n       setenv(\"API_KEY\", \"test_key\", 1);\n       // Test if the key is retrieved correctly\n       REQUIRE_FALSE(getApiKey().empty());\n   }\n\n   SECTION(\"Geocode Functionality\") {\n       std::string apiKey = \"test_key\";\n       std::pair\u003Cdouble, double> coordinates = geocodeZipcode(\"90210\", apiKey);\n       // Check latitude\n       REQUIRE(coordinates.first == 40.7128);\n       // Check longitude \n       REQUIRE(coordinates.second == -74.0060);\n   }\n}\n\n// API Data Retrieval\nTEST_CASE(\"API Data Retrieval\", \"[data_retrieval]\") {\n   SECTION(\"Air Quality Functionality\") {\n       std::string apiKey = \"test_key\";\n       double lat = 40.7128;\n       double lon = -74.0060;\n       std::string response = fetchAirQuality(lat, lon, apiKey);\n       // Check the response\n       REQUIRE(response == R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\");\n   }\n\n   SECTION(\"Current Weather Functionality\") {\n       auto weather = getCurrentWeather(\"dummyApiKey\", 40.7128, -74.0060);\n       // Ensure main weather description is not empty\n       REQUIRE_FALSE(weather.main.empty());\n       // Validate that temperature is a reasonable value\n       REQUIRE(weather.temperature > 0);\n   }\n}\n```\n\nRebuild the code and run the tests again to verify.\n\n```shell\ncmake --build build \n./build/tests\n```\n\n## Next steps\n\nIn this post, we covered how to integrate unit testing into a `C++` project using Catch2 testing framework and GitLab CI/CD and set up basic tests for our reference air quality application project.\n\nTo explore these concepts further, you can check out the [Catch2 documentation](https://github.com/catchorg/Catch2) and [GitLab's Unit test report examples documentation](https://docs.gitlab.com/ee/ci/testing/unit_test_report_examples.html). \n\nFor an advanced async exercise, you could build upon this project by using GitLab Duo to implement a feature that retrieves and analyzes historical air quality data and add code quality checks into the CI/CD pipeline. Happy coding! \n",[749,792,793,771,9],"testing","CI",{"slug":795,"featured":93,"template":683},"develop-c-unit-testing-with-catch2-junit-and-gitlab-ci","content:en-us:blog:develop-c-unit-testing-with-catch2-junit-and-gitlab-ci.yml","Develop C Unit Testing With Catch2 Junit And Gitlab Ci","en-us/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci.yml","en-us/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci",{"_path":801,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":802,"content":809,"config":816,"_id":819,"_type":16,"title":820,"_source":18,"_file":821,"_stem":822,"_extension":21},"/en-us/blog/gitlab-17-1-release",{"title":803,"description":804,"ogTitle":803,"ogDescription":804,"config":805,"ogImage":806,"ogUrl":807,"ogSiteName":697,"ogType":698,"canonicalUrls":807,"schema":808},"GitLab 17.1 Release","GitLab 17.1 released with Model registry available in beta and multiple GitLab Duo Code Suggestions in VS Code.",{"noIndex":93},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669047/Blog/Hero%20Images/product-gl17-blog-release-cover-17-1-0093-1800x945-fy25.png","https://about.gitlab.com/blog/gitlab-17-1-release","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab 17.1 Release\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabriel Engel\"}],\n        \"datePublished\": \"2024-06-20\",\n      }",{"title":803,"description":804,"authors":810,"heroImage":806,"date":812,"body":813,"category":14,"tags":814},[811],"Gabriel Engel","2024-06-20","Click here for the [GitLab 17.1 release post](https://about.gitlab.com/releases/2024/06/20/gitlab-17-1-released/).",[815,680,484],"releases",{"slug":817,"featured":93,"template":683,"externalUrl":818},"gitlab-17-1-release","https://about.gitlab.com/releases/2024/06/20/gitlab-17-1-released/","content:en-us:blog:gitlab-17-1-release.yml","Gitlab 17 1 Release","en-us/blog/gitlab-17-1-release.yml","en-us/blog/gitlab-17-1-release",{"_path":824,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":825,"content":831,"config":838,"_id":840,"_type":16,"title":841,"_source":18,"_file":842,"_stem":843,"_extension":21},"/en-us/blog/new-scheduled-reports-generation-tool-simplifies-value-stream-management",{"title":826,"description":827,"ogTitle":826,"ogDescription":827,"noIndex":6,"ogImage":828,"ogUrl":829,"ogSiteName":697,"ogType":698,"canonicalUrls":829,"schema":830},"New Scheduled Reports Generation tool simplifies value stream management","Proactively receive the most recent metrics from the GitLab Value Streams Dashboard, streamlining the reporting process. This walkthrough shows you how.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669134/Blog/Hero%20Images/blog-image-template-1800x945__17_.png","https://about.gitlab.com/blog/new-scheduled-reports-generation-tool-simplifies-value-stream-management","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New Scheduled Reports Generation tool simplifies value stream management\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2024-06-20\",\n      }",{"title":826,"description":827,"authors":832,"heroImage":828,"date":812,"body":834,"category":14,"tags":835},[833],"Haim Snir","Optimizing processes and performance is crucial for staying competitive in the fast-paced world of software development. [GitLab Value Stream Management (VSM)](https://www.youtube.com/watch?v=8pLEucNUlWI) is a powerful solution that helps organizations achieve this by providing a holistic view of the entire software delivery lifecycle. VSM enables teams to measure, manage, and improve their workflows, ensuring that every step adds value and minimizes waste. GitLab VSM also includes [AI Impact Analytics](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/), which helps managers quantify the impact of [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our AI-powered suite of features to power DevSecOps workflows, on productivity, providing deeper insights into how AI enhances developer efficiency. Now, we are announcing the next step in this VSM journey: Scheduled Reports Generation, available now.\n\nWith the Scheduled Reports Generation tool, value stream management becomes easier and more effective. Scheduled Reports Generation is designed to streamline the reporting process, providing you with the most recent [metrics from the Value Streams Dashboard](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html#dashboard-metrics-and-drill-down-reports), delivered on a scheduled basis.\n\nThe Value Streams Dashboard tracks key metrics throughout the software development lifecycle, assesses the impact of process improvements, and drills down into roadblocks. It helps to compare best practices across teams in turn improving workflow and delivering customer value faster.\n\n> Learn more with our [Value Streams Dashboard tutorial](https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard/).\n\n## Why scheduled VSM Reports are important\n\nScheduled Reports Generation provides software managers a powerful partner in their quest for continuous improvement. This tool offers the ability to automate the creation and distribution of detailed value stream reports across the software delivery lifecycle. Here’s why this is valuable:\n\n1. **Consistent monitoring:** Having automated reports ensures that software managers receive regular updates on critical metrics without manual intervention. This consistency helps in maintaining a continuous feedback loop.\n\n2. **Data-driven decision-making:** With up-to-date and accurate data at their fingertips, managers can make better and faster decisions, driving better results.\n\n3. **Time savings:** Automating report generation frees up valuable time for managers, allowing them to focus on strategic initiatives rather than routine data collection and analysis.\n\n### Inside the Scheduled Reports Generation tool\n\nHere is how the VSM tool works:\n\n1. The VSM reporting tool is a [CI/CD Catalog component](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/) that allows you to periodically schedule reports.\n\n2. These reports collect metrics from projects or groups via the public GitLab GraphQL API and are built using [GitLab Flavored Markdown](https://docs.gitlab.com/ee/user/markdown.html).\n\n3. As the final step, the tool opens an issue in the designated project, complete with a markdown comparison metrics table, as shown in the example below.\n\n![Scheduled reports generation - issue generation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677009/Blog/Content%20Images/Screenshot_2024-06-20_at_18.38.05.png)\n\n> To learn more and for additional examples, please visit the [Scheduled Reports Generation's README file](https://gitlab.com/components/vsd-reports-generator#example-for-monthly-executive-value-streams-report).\n\n### Get to know the Value Streams Dashboard\nWatch this intro video to get familiar with Value Streams Dashboard.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/8pLEucNUlWI?si=aIdrvREPVBwfC4wM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Additional resources\n- [Getting started with the new GitLab Value Streams Dashboard](https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard/)\n- [Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)",[836,484,837],"features","news",{"slug":839,"featured":6,"template":683},"new-scheduled-reports-generation-tool-simplifies-value-stream-management","content:en-us:blog:new-scheduled-reports-generation-tool-simplifies-value-stream-management.yml","New Scheduled Reports Generation Tool Simplifies Value Stream Management","en-us/blog/new-scheduled-reports-generation-tool-simplifies-value-stream-management.yml","en-us/blog/new-scheduled-reports-generation-tool-simplifies-value-stream-management",{"_path":845,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":846,"content":852,"config":858,"_id":860,"_type":16,"title":861,"_source":18,"_file":862,"_stem":863,"_extension":21},"/en-us/blog/combine-gitlab-webhooks-and-twilio-for-sms-alerts-on-devsecops-platform",{"title":847,"description":848,"ogTitle":847,"ogDescription":848,"noIndex":6,"ogImage":849,"ogUrl":850,"ogSiteName":697,"ogType":698,"canonicalUrls":850,"schema":851},"Combine GitLab webhooks and Twilio for SMS alerts on DevSecOps platform","Configure GitLab webhooks with SMS alerts to instantly get feedback on new and existing issues within a project and enable teams to react quickly to project- and group-level changes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099013/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2814%29_6VTUA8mUhOZNDaRVNPeKwl_1750099012960.png","https://about.gitlab.com/blog/combine-gitlab-webhooks-and-twilio-for-sms-alerts-on-devsecops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Combine GitLab webhooks and Twilio for SMS alerts on DevSecOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ted Gieschen\"}],\n        \"datePublished\": \"2024-06-10\",\n      }",{"title":847,"description":848,"authors":853,"heroImage":849,"date":855,"body":856,"category":14,"tags":857},[854],"Ted Gieschen","2024-06-10","We all strive to create the most robust and secure DevSecOps environments where everyone can collaborate to deliver amazing products for our customers. But no matter how robust and secure we design our environments we cannot exclude the possibility that something might go wrong. When an issue does occur we want to make sure we can remediate it quickly. To do that it's not only important to document the details of the issue but also get the right people notified immediately. In this article, we will set up GitLab [webhooks](https://docs.gitlab.com/ee/user/project/integrations/webhooks.html) together with [Twilio's functionality](https://www.twilio.com/en-us) to [send SMS alerts](https://www.twilio.com/docs/messaging) to the right people, getting them up to date so they can mitigate problems quickly.\n\n## Prerequisites\n\n1. A GitLab account: Webhooks aren't restricted by tier, which means this feature can be used with a [Free, Premium or Ultimate license](https://about.gitlab.com/pricing/) for either [GitLab's SaaS or self-managed offering](https://docs.gitlab.com/ee/subscriptions/choosing_subscription.html). If you don't have an account yet, you can create one on [our sign-up page]( https://gitlab.com/users/sign_up).\n\n2. A Twilio account: To handle the incoming webhook and send an SMS, you will need a Twilio account. If you don't already have one, you can create one on [Twilio's sign-up page](https://www.twilio.com/try-twilio).\n\n3. (Optional) An SMS-capable phone to test the functionality: We will be testing the functionality at the end of this article. If you want to follow along, you will need access to a phone that can receive SMS texts.\n\n4. (Optional) A basic understanding of Node.js: We will be handling the webhooks using a serverless function provided by Twilio Functions. This will be written in [Node.js](https://nodejs.org/en/about). Although you can simply copy-paste the functionality, it would be beneficial to understand the basics of Node.js so you can expand functionality in the future.\n\n## Building automated SMS notifications\n\nNow, let's get hands-on with building real-time SMS notifications.\n\nAt a high level, the workflow looks as follows:\n\n![SMS workflow](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099023261.png)\n\n1. An event is triggered within GitLab. This event is then picked up by GitLab's webhook functionality.\n2. The information of the event is then sent as a webhook to a [Twilio Function](https://www.twilio.com/docs/serverless/functions-assets/functions).\n3. Twilio Functions processes the event data sent by GitLab and creates the SMS body with relevant information.\n4. When complete, Twilio Functions triggers [Twilio Programmable Messaging](https://www.twilio.com/docs/messaging) with the SMS body and recipient information.\n5. Twilio Programmable Messaging then sends the SMS with the generated body to the recipient.\n\n### Set up Twilio SMS\n\nWe need to set up our Twilio environment to be able to send SMS. To do this, log in to your Twilio account. If you don't have one just follow the link provided in the prerequisites section above.\n\nOnce logged in you will see the Twilio Console, which will look something like this:\n\n![Twilio console](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099023261.png)\n\nFrom here, we will head to the left sidebar menu and select __United States (US1) > Phone Numbers > Manage > Active numbers__ and then click the \"Buy a number\" button.\n\n![Buy a number screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750099023263.png)\n\nYou can select a phone number, which will be the number that notifications are sent from. There are some [guidelines](https://www.twilio.com/docs/messaging/guides/sending-international-sms-guide) specific to which countries you can send SMS based on the Twilio phone number you purchase, so please keep that in mind. In this example, I will be using my personal U.S. phone number for this article as the recipient phone number, so, in this case, I will purchase a U.S. Twilio number. Just make sure your phone number has the SMS capability. Once selected, simply click the \"Buy \u003Cphone number>\"  button.\n\n![twilio webhooks - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099023265.png)\n\nNext, we just need to make sure Twilio can send SMS to our recipient phone number by allowing Twilio Programmable Messaging to send SMS to the country our recipient phone number is associated with. To do so, head to __[United States (US1) > Messaging > Settings > Geo permissions__ and make sure that the country associated with the recipient's phone number is selected (for example, as I am using my U.S. phone number as the recipient phone number in this blog, I will select United States).\n\n![twilio webhooks - image 5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750099023267.png)\n\nClick \"Save geo permissions.\" With that we're all set up to send SMS.\n\nNext, let's handle the processing of the webhook and the creation of our SMS alerts with Twilio Functions.\n\n### Set up Twilio Functions\n\nTo process the webhook we will be sending to Twilio, we need to define a Twilio Function. To do this, select **United States (US1) > Functions and Assets > Functions (Classic) > List** and click \"Create a Function.\" Select the \"Hello SMS\" option in the pop-up and click \"Create.\"\n\n![Create a Twilio function](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099023269.png)\n\nNow, let's go ahead and configure our Twilio Function.\n\n1. Extend the path for example `/handle-event-webhook`. In my case this would result in the following path: `https://daff-mac-7354.twil.io/handle-event-webhook`.\n\n2. Disable the option `Check for valid Twilio signature`.\n\n3. Adjust the code to the following, making sure to update the values for `\u003Cyour personal phone number>` and `\u003Cyour Twilio Phone number>`:\n\n``` javascript\nexports.handler = function (context, event, callback) {\n  const twilioClient = context.getTwilioClient();\n\n  twilioClient.messages\n    .create({\n      body: `Hi there! There was an update to issue (${event[\"object_attributes\"][\"id\"]}) with title \"${event[\"object_attributes\"][\"title\"]}\" in project ${event[\"repository\"][\"name\"]}. It was just ${event[\"object_attributes\"][\"action\"]}.`,\n      to: \"\u003Cyour personal phone number>\",\n      from: \"\u003Cyour Twilio Phone number>\",\n    })\n    .then((message) => {\n      console.log(\"SMS successfully sent\");\n      console.log(message.sid);\n      return callback(null, `Success! Message SID: ${message.sid}`);\n    })\n    .catch((error) => {\n      console.error(error);\n      return callback(error);\n    });\n};\n\n```\n\nIt should end up looking like the following:\n\n  ![Configuration for Twilio function](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099023271.jpg)\n\nNow, whenever our endpoint is hit, it should trigger an SMS with a custom message indicating a change to an existing issue which will represent an example of the various [webhook events](https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html) we can configure.\n\nNext, let's set our webhooks within GitLab to trigger this endpoint whenever a change to an issue is made.\n\n### Set up GitLab webhooks\n\nLog in to your GitLab instance and go to the project you would like to configure event webhooks in.\n\nOnce in the Project, go to **Settings > Webhooks** and click on \"Add new webhook.\"\n\n![Screen to add a new webhook](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099023273.png)\n\nYou will only need to configure the following fields:\n\n1. URL: This should be the endpoint we defined in the previous section. In the previous example that would be `https://daff-mac-7354.twil.io/handle-event-webhook`.\n\n2. Trigger: In our case, we will be reacting to [issues events](https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#issue-events), so check \"Issues events.\"\n\n![Configuring URL and trigger fields](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750099023274.png)\n\nWe're all set to test our setup!\n\n### Testing\n\nWhile in the project that was just configured to react to issues events, head to \"Plan > Issues\" and click on \"New issue.\"\n\n![New issue screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750099023276.png)\n\nAdd a title and click on \"Create Issue.\"\n\n  ![Create issue screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750099023278.png)\n\nIf everything is configured correctly, you should get an SMS looking something like:\n\n`Sent from your Twilio trial account - Hi there! There was an update to issue (146735617) with title \"GitLab webhook example\" in project Webhooks Example. It was just opened.`\n\n## Expanding the use case\n\nWe've leveraged Twilio's SMS functionality in combination with GitLab webhooks to instantly get feedback on new and existing issues within our project, allowing us to react quickly to any changes that might occur. This simple use case showed how one person could instantly get informed about a single type of event. However, often we want to inform more people about various events or be able to react to more than just one type of event (like issue creation and updates).\n\nThis functionality can be expanded by:\n\n1. Sending SMS alerts to multiple people: This can be achieved by extending the Twilio Function to loop through a given array of phone numbers. [Twilio's Messaging Service](https://www.twilio.com/docs/messaging/services) can be leveraged to potentially simplify the process of sending SMS to various phone numbers.\n\n2. Handling different event types: Select more types of webhook events in the Project settings to react to other things like [comments](https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#comment-events), [deployments](https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#deployment-events), or [releases](https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#release-events).\n\n3. Configure on a group level: In this example, we’ve only configured webhooks on a project level. However, if it is relevant to react to events across projects on a group level, this can also be configured, removing the need to change webhook settings for each project.\n\n4. Self-host message generation functionality: Leverage [Twilio Server Side SDKs](https://www.twilio.com/docs/libraries) instead of Twilio Functions to host the code yourself. This could benefit you if you have restrictions on where you can host code as well as allow you to more easily connect with the rest of your code base likecfetching information from your database to get phone numbers for relevant people.\n\n> Start [a free 30-day trial of GitLab Ultimate](https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/blog&glm_content=default-saas-trial) today to test-drive more DevSecOps features.",[836,749,680,750,484],{"slug":859,"featured":93,"template":683},"combine-gitlab-webhooks-and-twilio-for-sms-alerts-on-devsecops-platform","content:en-us:blog:combine-gitlab-webhooks-and-twilio-for-sms-alerts-on-devsecops-platform.yml","Combine Gitlab Webhooks And Twilio For Sms Alerts On Devsecops Platform","en-us/blog/combine-gitlab-webhooks-and-twilio-for-sms-alerts-on-devsecops-platform.yml","en-us/blog/combine-gitlab-webhooks-and-twilio-for-sms-alerts-on-devsecops-platform",{"_path":865,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":866,"content":873,"config":878,"_id":881,"_type":16,"title":882,"_source":18,"_file":883,"_stem":884,"_extension":21},"/en-us/blog/gitlab-17-0-release",{"title":867,"description":868,"ogTitle":867,"ogDescription":868,"config":869,"ogImage":870,"ogUrl":871,"ogSiteName":697,"ogType":698,"canonicalUrls":871,"schema":872},"GitLab 17.0 Release","GitLab 17.0 released with generally available CI/CD Catalog and AI Impact analytics dashboard.",{"noIndex":93},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665762/Blog/Hero%20Images/blog-gl17-release-hero-17-0-93-1800x945-fy25__1_.png","https://about.gitlab.com/blog/gitlab-17-0-release","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab 17.0 Release\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabriel Engel\"}],\n        \"datePublished\": \"2024-05-16\",\n      }",{"title":867,"description":868,"authors":874,"heroImage":870,"date":875,"body":876,"category":14,"tags":877},[811],"2024-05-16","This is the [GitLab 17 release post](https://about.gitlab.com/releases/2024/05/16/gitlab-17-0-released/).",[815],{"slug":879,"featured":93,"template":683,"externalUrl":880},"gitlab-17-0-release","https://about.gitlab.com/releases/2024/05/16/gitlab-17-0-released/","content:en-us:blog:gitlab-17-0-release.yml","Gitlab 17 0 Release","en-us/blog/gitlab-17-0-release.yml","en-us/blog/gitlab-17-0-release",{"_path":886,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":887,"content":894,"config":902,"_id":905,"_type":16,"title":906,"_source":18,"_file":907,"_stem":908,"_extension":21},"/en-us/blog/gitlab-patch-release-16-11-1-16-10-4-16-9-6",{"title":888,"description":889,"ogTitle":888,"ogDescription":889,"config":890,"ogImage":891,"ogUrl":892,"ogSiteName":697,"ogType":698,"canonicalUrls":892,"schema":893},"GitLab Patch Release: 16.11.1, 16.10.4, 16.9.6","Learn more about GitLab Patch Release: 16.11.1, 16.10.4, 16.9.6 for GitLab Community Edition (CE) and Enterprise Edition (EE).",{"noIndex":93},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662877/Blog/Hero%20Images/security-cover-new.png","https://about.gitlab.com/blog/gitlab-patch-release-16-11-1-16-10-4-16-9-6","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Patch Release: 16.11.1, 16.10.4, 16.9.6\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Costel Maxim\"}],\n        \"datePublished\": \"2024-04-24\",\n      }",{"title":888,"description":889,"authors":895,"heroImage":891,"date":897,"body":898,"category":14,"tags":899},[896],"Costel Maxim","2024-04-24","This is the post for [GitLab Patch Release: 16.11.1, 16.10.4, 16.9.6](https://about.gitlab.com/releases/2024/04/24/patch-release-gitlab-16-11-1-released/).",[900,815,901],"patch releases","security releases",{"slug":903,"featured":6,"template":683,"externalUrl":904},"gitlab-patch-release-16-11-1-16-10-4-16-9-6","https://about.gitlab.com/releases/2024/04/24/patch-release-gitlab-16-11-1-released/","content:en-us:blog:gitlab-patch-release-16-11-1-16-10-4-16-9-6.yml","Gitlab Patch Release 16 11 1 16 10 4 16 9 6","en-us/blog/gitlab-patch-release-16-11-1-16-10-4-16-9-6.yml","en-us/blog/gitlab-patch-release-16-11-1-16-10-4-16-9-6",{"_path":910,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":911,"content":918,"config":924,"_id":927,"_type":16,"title":928,"_source":18,"_file":929,"_stem":930,"_extension":21},"/en-us/blog/gitlab-16-11-release",{"title":912,"description":913,"ogTitle":912,"ogDescription":913,"config":914,"ogImage":915,"ogUrl":916,"ogSiteName":697,"ogType":698,"canonicalUrls":916,"schema":917},"GitLab 16.11 Release","GitLab 16.11 released with GitLab Duo Chat general availability",{"noIndex":93},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099004/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%284%29_1od9f5DGEA0ntKLVnJbA2p_1750099004258.png","https://about.gitlab.com/blog/gitlab-16-11-release","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab 16.11 Release\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Loryn Bortins\"}],\n        \"datePublished\": \"2024-04-18\",\n      }",{"title":912,"description":913,"authors":919,"heroImage":915,"date":921,"body":922,"category":14,"tags":923},[920],"Loryn Bortins","2024-04-18","This is the [16.11 release post](https://about.gitlab.com/releases/2024/04/18/gitlab-16-11-released/).",[815,484],{"slug":925,"featured":6,"template":683,"externalUrl":926},"gitlab-16-11-release","https://about.gitlab.com/releases/2024/04/18/gitlab-16-11-released/","content:en-us:blog:gitlab-16-11-release.yml","Gitlab 16 11 Release","en-us/blog/gitlab-16-11-release.yml","en-us/blog/gitlab-16-11-release",{"_path":932,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":933,"content":939,"config":944,"_id":946,"_type":16,"title":947,"_source":18,"_file":948,"_stem":949,"_extension":21},"/en-us/blog/interactive-take-a-guided-tour-of-the-devsecops-workflow",{"title":934,"description":935,"ogTitle":934,"ogDescription":935,"noIndex":6,"ogImage":936,"ogUrl":937,"ogSiteName":697,"ogType":698,"canonicalUrls":937,"schema":938},"Interactive: Take a guided tour of the DevSecOps workflow","Explore GitLab's recommended best practices for DevSecOps with a detailed visual depiction of the main steps in the development lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668969/Blog/Hero%20Images/blog-image-template-1800x945__1800_x_945_px_.png","https://about.gitlab.com/blog/interactive-take-a-guided-tour-of-the-devsecops-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Interactive: Take a guided tour of the DevSecOps workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2024-04-16\",\n      }",{"title":934,"description":935,"authors":940,"heroImage":936,"date":941,"body":942,"category":14,"tags":943},[702],"2024-04-16","When engaging in discussions with industry professionals and stakeholders, they quickly grasp the core principles of DevSecOps, which emphasize speed, security, and quality. However, there's often a curiosity about the specific strategies required to achieve optimal speed without compromising security and quality. We created this interactive infographic to showcase GitLab's best practices for [DevSecOps](https://about.gitlab.com/topics/devsecops/) through a detailed visual depiction of the main steps in the development lifecycle.\n\nWalk through every step of the DevSecOps process, including creation of [issues](https://docs.gitlab.com/ee/user/project/issues/), development and pushing of code, [security testing](https://about.gitlab.com/stages-devops-lifecycle/secure/), and deployment to production. Each step features a deep dive with additional resources such as demos, blog posts, and documentation.\n\n## Get started with the interactive tour\n\nClick on the image below to access the guided tour, and use the navigation buttons or keyword arrows to easily make your way through the flow.\n\n[![GitLab workflow description](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676928/Blog/Content%20Images/infographic.png)](https://tech-marketing.gitlab.io/static-demos/gitlab-infographic.html)\n\n\u003Cp>\u003C/p>\n\n> > Learn how [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI-powered features, further enhances the DevSecOps workflow.\n",[9,727,111],{"slug":945,"featured":93,"template":683},"interactive-take-a-guided-tour-of-the-devsecops-workflow","content:en-us:blog:interactive-take-a-guided-tour-of-the-devsecops-workflow.yml","Interactive Take A Guided Tour Of The Devsecops Workflow","en-us/blog/interactive-take-a-guided-tour-of-the-devsecops-workflow.yml","en-us/blog/interactive-take-a-guided-tour-of-the-devsecops-workflow",{"_path":951,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":952,"content":958,"config":964,"_id":966,"_type":16,"title":967,"_source":18,"_file":968,"_stem":969,"_extension":21},"/en-us/blog/top-10-gitlab-workflow-hacks-you-need-to-know",{"title":953,"description":954,"ogTitle":953,"ogDescription":954,"noIndex":6,"ogImage":955,"ogUrl":956,"ogSiteName":697,"ogType":698,"canonicalUrls":956,"schema":957},"Top 10 GitLab workflow hacks you need to know","A GitLab product manager shares her favorite tricks to navigate quickly and efficiently around the GitLab DevSecOps Platform and to boost team collaboration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099361/Blog/Hero%20Images/Blog/Hero%20Images/lightvisibility_lightvisibility.png_1750099361252.png","https://about.gitlab.com/blog/top-10-gitlab-workflow-hacks-you-need-to-know","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top 10 GitLab workflow hacks you need to know\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Amanda Rueda\"}],\n        \"datePublished\": \"2024-04-09\",\n      }",{"title":953,"description":954,"authors":959,"heroImage":955,"date":961,"body":962,"category":14,"tags":963},[960],"Amanda Rueda","2024-04-09","In the world of software development, efficiency isn't just about moving fast – it's about smart navigation. As a GitLab product manager, I truly understand the value of efficiency when working within the DevSecOps platform. These are my top 10 favorite GitLab features and they might be the workflow hacks you never knew you needed.\n\nLet's dive into these hidden gems to unlock a new level of productivity and collaboration within your team.\n\n## 1. Resolve comments\n\nNot just for merge requests! Resolving comments on issues can significantly reduce noise and streamline task management. It's particularly handy for managing feedback efficiently.\n\n> **Why do I love it?** Not only does resolving comments reduce the noise on an issue, but it’s also a great way to manage tasks.\n>\n> **Use case.** Resolving comments is a great tool for issues where you are collecting feedback – respond to the feedback and provide a link, resolve the comment, and move on to the next one.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/discussions/#resolve-a-thread)__\n\n![example of resolve comments - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099376147.gif)\n\n\u003Cp>\u003C/p>\n\n## 2. Internal comments\n\nSpeak directly to your team without an external audience. Keep discussions private within an issue or merge request with comments visible only to your team members. It's the perfect balance between transparency and privacy.\n\n> **Why do I love it?** It balances privacy with transparency, while keeping the broader discussion open for the community.\n>\n> **Use case.** When coordinating a product launch, your marketing team can use internal comments to discuss and refine messaging and strategy. This keeps your discussions centralized and easily accessible to the team while in draft mode.\n>\n> **[How-to documentation](https://docs.gitlab.com/ee/user/discussions/#add-an-internal-note)**\n\n![internal comments example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099376148.png)\n\n\u003Cp>\u003C/p>\n\n## 3. And/or in filters\n\nWhen searching records on a listing page, using and/or filters can help you slice through the noise and find exactly what you're looking for quickly and efficiently.\n\n> **Why do I love it?** Perfect for finding exactly what you need, powering efficient and streamlined workflows.\n>\n>**Use case.** Search for feature issues related to a specific initiative that are assigned to specific groups.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#filter-with-the-or-operator)__\n\n![and/or filter example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/and_or__1__aHR0cHM6_1750099376152.gif)\n\n\u003Cp>\u003C/p>\n\n## 4. Auto expand URLs\n\nAppending '+' or '+s' to the end of a GitLab URL transforms it into an informative snippet, allowing you to share progress without forcing your teammates to leave the page.\n\n> **Why do I love it?** It's like having x-ray vision for URLs – see the important stuff without even clicking!\n>\n> **Use case.** Sharing progress in comments? Just add '+s' to the link, and boom – everyone's instantly on the same page.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/markdown.html#show-the-issue-merge-request-or-epic-title-in-the-reference)__\n\n![auto expand URLs example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750099376154.gif)\n\n\u003Cp>\u003C/p>\n\n## 5. Quick actions\n\nWith simple text commands, quick actions let you perform tasks like assigning users, adding labels, and more, directly from the description or comment box, saving you clicks and time.\n\n> **Why do I love it?** Saves clicks and time.\n>\n> **Use case.** When creating a new issue I use quick actions to automatically add labels, a milestone, and connect to the epic upon saving the record.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/project/quick_actions.html)__\n\n![quick actions example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099376156.gif)\n\n\u003Cp>\u003C/p>\n\n## 6. Bulk edit\n\nApply labels, change assignees, or update milestones for multiple issues at once. This feature turns potentially tedious updates into a breeze, allowing for quick adjustments across numerous issues.\n\n> **Why do I love it?** Because it turns tedious updates into quick updates!\n>\n> **Use case.** Need to tag the whole sprint's issues as Review needed? Just filter, select all, and add that label in bulk – easy peasy.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#bulk-edit-issues-from-a-project)__\n\n![bulk edit example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099376157.gif)\n\n\u003Cp>\u003C/p>\n\n## 7. Epic swimlanes\n\nGroup issues under epics on your board to visually track and discuss progress. It's a powerful way to contextualize work during reviews or standups.\n\n> **Why do I love it?** Easily understand the context of work as you’re walking the board.\n>\n> **Use case.** Group by epic during standup reviews to easily piece together work with its parent initiative.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/project/issue_board.html#group-issues-in-swimlanes)__\n\n![epic swimlanes example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099376158.gif)\n\n\u003Cp>\u003C/p>\n\n## 8. Wiki diagrams\n\nIllustrate ideas and workflows directly in your wiki pages with easy-to-create diagrams. This feature supports visual learning and simplifies complex concepts.\n\n> **Why do I love it?** It’s incredibly user-friendly and flexible.\n>\n> **Use case.** When outlining a new feature workflow, draw it directly in the wiki page, making it crystal clear for everyone on the team.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/administration/integration/diagrams_net.html)__\n\n![wiki diagrams example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099376159.gif)\n\n\u003Cp>\u003C/p>\n\n## 9. Table creation\n\nForget about wrestling with markdown for table creation. The rich text editor lets you effortlessly insert and format tables, making documentation cleaner and more structured.\n\n> **Why do I love it?** It turns the table creation ordeal into a breeze, making updates clean and structured with just a few clicks.\n>\n> **Use case.** Compiling a sprint retro? Quickly insert a table to organize feedback, action items, and owners, making the review process smoother for everyone.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/rich_text_editor.html#tables)__\n\n![table creation example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750099376160.gif)\n\n\u003Cp>\u003C/p>\n\n## 10. Video and GIF embeds\n\nEnhance your issues and epic descriptions or comments with embedded GIFs and YouTube videos, adding a dynamic layer to your communication.\n\n> **Why do I love it?** Sometimes a GIF or video speaks better than words.\n>\n> **Use case.** Trying to explain a UI bug? Embed a YouTube video for a quick walkthrough of the proposed feature enhancement.\n\n![video and gif embed example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/gif__1__aHR0cHM6_1750099376161.gif)\n\n\u003Cp>\u003C/p>\n\n## Explore these features\n\nThese features represent just the tip of the iceberg in GitLab's comprehensive toolkit designed to boost efficiency and foster better collaboration. While they may be underutilized, their impact on your workflow could be substantial. I encourage you to explore these features further and integrate them into your daily routines.\n\n> Are you excited to power your DevSecOps workflow using GitLab? [Try GitLab Ultimate for free for 30 days](https://gitlab.com/-/trial_registrations/new).\n",[749,484,836,727],{"slug":965,"featured":6,"template":683},"top-10-gitlab-workflow-hacks-you-need-to-know","content:en-us:blog:top-10-gitlab-workflow-hacks-you-need-to-know.yml","Top 10 Gitlab Workflow Hacks You Need To Know","en-us/blog/top-10-gitlab-workflow-hacks-you-need-to-know.yml","en-us/blog/top-10-gitlab-workflow-hacks-you-need-to-know",{"_path":971,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":972,"content":978,"config":984,"_id":986,"_type":16,"title":987,"_source":18,"_file":988,"_stem":989,"_extension":21},"/en-us/blog/how-to-successfully-deliver-your-software-development-roadmap",{"title":973,"description":974,"ogTitle":973,"ogDescription":974,"noIndex":6,"ogImage":975,"ogUrl":976,"ogSiteName":697,"ogType":698,"canonicalUrls":976,"schema":977},"How to successfully deliver your software development roadmap","Here are three common blockers and how to overcome them to fully realize the ROI of a DevSecOps platform investment.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669103/Blog/Hero%20Images/AdobeStock_243118595.jpg","https://about.gitlab.com/blog/how-to-successfully-deliver-your-software-development-roadmap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to successfully deliver your software development roadmap\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2024-04-04\",\n      }",{"title":973,"description":974,"authors":979,"heroImage":975,"date":981,"body":982,"category":14,"tags":983},[980],"David DeSanto, Chief Product Officer, GitLab","2024-04-04","2024 is shaping up to be the year of DevSecOps, where more organizations realize the full potential of blending development, security, and operations through the adoption of a comprehensive platform. This is when teams will move beyond using just source code management (SCM) and tap into all the [AI-powered features](https://about.gitlab.com/topics/devops/the-role-of-ai-in-devops/) available across the software development lifecycle (SDLC), delivering better, more secure software faster. But first organizations have to knock down the blockers that can get in the way of successful [DevSecOps](https://about.gitlab.com/topics/devsecops/) adoption.\n\nIn talking to customers at organizations of all sizes, I've heard three main blockers:\n* The potential of AI is believable, but right now it seems limited to code creation and that has limited impact as there is more to the SDLC.\n* A platform seems like a great idea, but forcing my development, security, and operations team to give up their preferred tools all at once will undoubtedly cause a revolt. Yet, without everyone on the same platform, the investment is hard to justify.\n* Regulations and compliance makes it difficult to leverage a DevSecOps SaaS solution, and being on a multi-tenant solution is a non-starter for us as we are in a highly regulated industry. However, the overhead of self-hosting a DevSecOps platform is becoming untenable at our scale.\n\nWhile legitimate concerns, these blockers can be eliminated by combining DevSecOps practices and a platform approach. \"Making sure that we spend our money wisely is very, very important. GitLab allowed us to reduce our costs and centralize our work in one place. It’s been money well spent,\" says Andy Chow, Technology Chief of Staff at global fintech company Airwallex.\n\nLet's dig deeper into each blocker and see how it is resolved with a DevSecOps platform.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n### AI is not limited: It is having real impact across the SDLC\n\nWe know that AI is already improving the developer experience but there is so much more that AI can do across the entire SDLC. With AI, organizations can unburden development, security, and operations teams from tedious tasks by taking advantage of the efficiencies that AI provides. For instance, users can access summaries of comments in merge requests, have tests generated, refactor sections of code, and perform other time-saving actions.\n\nThat's why with [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI-powered workflows, we focus on more than just code creation — after all, [code creation only accounts for 25% of a developer's time](https://about.gitlab.com/blog/gitlab-global-devsecops-ai-report/). There is so much more that happens in the SDLC where AI can add efficiency. For instance, development, security, and operations teams that use AI-powered capabilities, such as Vulnerability Remediation and Root Cause Analysis, share that they can find and resolve vulnerabilities earlier and identify CI/CD pipeline failures faster and in a more collaborative manner.\n\n### Forget one-size-fits all, migrate your way\n\nRealizing the benefit of a DevSecOps platform is not a one-size-fits-all. You can customize your deployment to fit your organization's needs and where you are in your digital transformation journey — choosing one team at a time to adopt the platform or a full cutover. I have advice, though: Commit to using more than just source code management. A DevSecOps platform is a robust solution that includes enterprise agile planning, CI/CD, security and compliance, value stream analytics, and more. Also, make sure that as you deploy your platform, your users agree to get familiar with its range of capabilities — and not still maintain [a complex toolchain](https://about.gitlab.com/blog/battling-toolchain-technical-debt/).\n\nThe way to extract the most ROI and satisfaction from your migration is to show users how to get the functionality they had in their other tools from within the DevSecOps platform. To that end, we've increased our resources to support you. From [in-depth tutorials](https://about.gitlab.com/blog/tags/tutorial/) to [clear reference architectures](https://docs.gitlab.com/ee/administration/reference_architectures/), we have a vast library of content (including videos) for you to draw upon to help your users acclimate to and thrive in the DevSecOps environment. \n\nWe've also made it easier to onboarding teams, with capabilities like [remote development environments](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/), enabling organizations to reduce adoption friction. In fact, as more teams within your organization adopt GitLab, consider expanding access for other critical functions that contribute to delivering software value such as Finance, Legal, and Marketing teams. The power of a DevSecOps platform is giving everyone visibility into the SDLC which drives better collaboration, improves planning, reduces security risk, improves team velocity, and leads to faster time-to-value. This means your teams are happier and so are the users of the applications you build, secure, and deploy using GitLab.\n\n> Read how the U.S. Navy's Black Pearl [sped up onboarding using GitLab](https://about.gitlab.com/blog/u-s-navy-black-pearl-lessons-in-championing-devsecops/).\n\n**Note:** GitLab doesn't have to be introduced to the organization by the development team. For instance, if security teams want vulnerabilities identified and mitigated earlier in the lifecycle or increased compliance via security scanning, they can recommend that developers use the DevSecOps platform. [Read how U.K. retailer Dunelm made this happen](https://about.gitlab.com/customers/dunelm/).\n\n### Multi-tenancy is just one option; single-tenancy can address regulatory requirements\n\nKeeping software up-to-date and secure while maintaining compliance with strict regulations can make self-hosting a challenge. In June 2023, we launched [GitLab Dedicated](https://about.gitlab.com/dedicated/), our single-tenant SaaS solution, into general availability to address the needs of organizations in highly regulated industries like finance and healthcare, and in highly regulated geographies such as the European Union. GitLab Dedicated provides the secure environment organizations need for regulatory compliance, including control over data residency and isolation, while removing the overhead of self-hosting.\n\nDedicated customers are upgraded automatically every month, which means they have all of the benefits without the administration overhead. Furthermore, GitLab Dedicated comes with [GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/), enabling organizations to ship secure software faster with built-in compliance visibility and controls as well as advanced security scanning capabilities.\n\n> Learn the [origins of GitLab Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/) and how it has grown into the solution highly regulated organizations need.\n\n### Try GitLab today\n\nAs you proceed with your software development roadmap for 2024 (and beyond), consider what an AI-powered DevSecOps platform could do for your organization. Also keep an eye on [our Direction page](https://about.gitlab.com/direction/#fy25-rd-investment-themes) to learn about what’s coming next and [our monthly release posts](https://about.gitlab.com/blog/categories/devsecops-platform/) to learn about the latest and greatest available.\n\n> Start your trial of [GitLab Duo Pro](https://about.gitlab.com/gitlab-duo/#free-trial) or [GitLab Ultimate](https://gitlab.com/-/trials/new) for free today.\n",[771,9,484,750],{"slug":985,"featured":93,"template":683},"how-to-successfully-deliver-your-software-development-roadmap","content:en-us:blog:how-to-successfully-deliver-your-software-development-roadmap.yml","How To Successfully Deliver Your Software Development Roadmap","en-us/blog/how-to-successfully-deliver-your-software-development-roadmap.yml","en-us/blog/how-to-successfully-deliver-your-software-development-roadmap",{"_path":991,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":992,"content":999,"config":1005,"_id":1008,"_type":16,"title":1009,"_source":18,"_file":1010,"_stem":1011,"_extension":21},"/en-us/blog/gitlab-16-10-release",{"title":993,"description":994,"ogTitle":993,"ogDescription":994,"config":995,"ogImage":996,"ogUrl":997,"ogSiteName":697,"ogType":698,"canonicalUrls":997,"schema":998},"GitLab 16.10 Release","GitLab 16.10 released with semantic versioning in the CI/CD catalog",{"noIndex":93},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668956/Blog/Hero%20Images/16.10_cover_image_-_Blog-1800x800.png","https://about.gitlab.com/blog/gitlab-16-10-release","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab 16.10 Release\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Torsten Linz\"}],\n        \"datePublished\": \"2024-03-21\",\n      }",{"title":993,"description":994,"authors":1000,"heroImage":996,"date":1002,"body":1003,"category":14,"tags":1004},[1001],"Torsten Linz","2024-03-21","This is the [release post for GitLab 16.10](https://about.gitlab.com/releases/2024/03/21/gitlab-16-10-released/).",[815],{"slug":1006,"featured":6,"template":683,"externalUrl":1007},"gitlab-16-10-release","https://about.gitlab.com/releases/2024/03/21/gitlab-16-10-released/","content:en-us:blog:gitlab-16-10-release.yml","Gitlab 16 10 Release","en-us/blog/gitlab-16-10-release.yml","en-us/blog/gitlab-16-10-release",{"_path":1013,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1014,"content":1021,"config":1027,"_id":1030,"_type":16,"title":1031,"_source":18,"_file":1032,"_stem":1033,"_extension":21},"/en-us/blog/gitlab-16-9-release",{"title":1015,"description":1016,"ogTitle":1015,"ogDescription":1016,"config":1017,"ogImage":1018,"ogUrl":1019,"ogSiteName":697,"ogType":698,"canonicalUrls":1019,"schema":1020},"GitLab 16.9 Release","16.9 features GitLab Duo Chat with wider Beta access, usability improvements to the CI/CD variables page, more options for auto-canceling pipelines, and more!",{"noIndex":93},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668947/Blog/Hero%20Images/16.9_cover_image_-_Blog-1800x800.png","https://about.gitlab.com/blog/gitlab-16-9-release","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab 16.9 Release\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2024-02-15\",\n      }",{"title":1015,"description":1016,"authors":1022,"heroImage":1018,"date":1024,"body":1025,"category":14,"tags":1026},[1023],"Tim Rizzi","2024-02-15","[This is the 16.9 release post.](https://about.gitlab.com/releases/2024/02/15/gitlab-16-9-released/)",[815,484],{"slug":1028,"featured":93,"template":683,"externalUrl":1029},"gitlab-16-9-release","https://about.gitlab.com/releases/2024/02/15/gitlab-16-9-released/","content:en-us:blog:gitlab-16-9-release.yml","Gitlab 16 9 Release","en-us/blog/gitlab-16-9-release.yml","en-us/blog/gitlab-16-9-release",{"_path":1035,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1036,"content":1042,"config":1049,"_id":1051,"_type":16,"title":1052,"_source":18,"_file":1053,"_stem":1054,"_extension":21},"/en-us/blog/how-to-tailor-gitlab-access-with-custom-roles",{"title":1037,"description":1038,"ogTitle":1037,"ogDescription":1038,"noIndex":6,"ogImage":1039,"ogUrl":1040,"ogSiteName":697,"ogType":698,"canonicalUrls":1040,"schema":1041},"How to tailor GitLab access with custom roles","Find out the current capabilities of custom roles and what's to come, including initial grouping of permissions and templating from default roles.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098975/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_729993502_1Xe0pzHPX4C3b1Ycs2q7RP_1750098974565.jpg","https://about.gitlab.com/blog/how-to-tailor-gitlab-access-with-custom-roles","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to tailor GitLab access with custom roles\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joe Randazzo\"},{\"@type\":\"Person\",\"name\":\"Hannah Sutor\"}],\n        \"datePublished\": \"2024-02-13\",\n      }",{"title":1037,"description":1038,"authors":1043,"heroImage":1039,"date":1046,"body":1047,"category":14,"tags":1048},[1044,1045],"Joe Randazzo","Hannah Sutor","2024-02-13","At GitLab, we knew we had a big problem to solve. Our existing, default user roles were becoming roadblocks for our customers. The default roles, such as Guest, Reporter, Developer, Maintainer, and Owner, offer a predefined set of permissions that cannot be customized. Customers were forced to fit their specific needs into the existing roles, leading to either overly permissive access, which is a security risk, or under-privileged access, which required administrator overhead to temporarily elevate privileges of a user in order to perform a task, and remember to move them back down to their normal role afterwards.\n\nIn 15.9, we released our [first iteration for customizable roles](https://about.gitlab.com/blog/expanding-guest-capabilities-in-gitlab-ultimate/) within GitLab. It allowed customers to do one simple thing: Give the Guest user the ability to view code, without consuming a seat. Our hope was to give our customers the ability to add more privilege to the Guest role, if they so desired, while retaining the benefit of free Guest users with an Ultimate subscription.\n\nOur MVC was released almost a year ago now, so we wanted to provide an update on the progress we’ve made with customizable roles and an idea of where we are headed.\n\n![Custom roles - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098994/Blog/Content%20Images/Blog/Content%20Images/create_role_output__2__aHR0cHM6_1750098994380.gif)\n\n![Custom roles - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098994/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098994380.gif)\n\n## Looking at the next iteration of custom roles\n\nAs we build toward the next iteration of [custom roles](https://docs.gitlab.com/ee/user/custom_roles.html) and permissions, we have gathered a lot of feedback from the MVC. Two common themes that have been uncovered are:\n- reducing privilege of Developer, Maintainer, and Owner roles\n- a wide range of access permutations\n\nHere's how we plan to address these challenges.\n\n### Consistent CRUD model\n\nIf you have designed role-based access control (RBAC) in Google Cloud Platform (GCP) or Kubernetes, you may have appreciated the predictable permission verbs on resource access. As we continue to build out the next groupings of permissions for custom roles, the permissions will follow a consistent Create, Read, Update, and Delete (CRUD) model so you can predictably design your resource access within your organization.\n\nIf we examine the table below, “Manage” would be given to select few in the department or organization, whereas \"Write\" and \"View\" would be a common contributor to that resource.\n\n| Permission    | Description     |\n| ---------- | ---------- |\n| Manage       | Full CRUD operations on resources. Plus configuring the settings of the resource. *Assumes Write/View/Delete* |\n| Write       | Add or update the resource. *Assumes View*     |\n| View       | View the resource      |\n| Delete      | Delete the resource. *Assumes View*      |\n\n\u003Cp>\u003C/p>\n\nBelow is a concrete example of permissions related to registries. While this table is coarse-grained as this groups all registry types together at first, this can become fine-grained over time by pulling out each registry type as requested.\n\n\u003Cp>\u003C/p>\n\n| Permission    | Description     |\n| ---------- | ---------- |\n| Manage       | CRUD operations on objects, including Registries, Proxy, Cleanup Policies, along with managing the settings      |\n| Write       | Ability to push a container, package, or terraform module to registry    |\n| View       | Ability to view, retrieve, and pull registry objects and metadata on repositories and images      |\n| Delete      | Ability to delete registry objects and metadata      |\n\n### Remove default role dependency\n\nDuring the custom role creation process, starting with a base default role can be a quick way to add permissions, but it’s limiting when reducing only one or two permissions from Maintainer or Owner. The next iteration will allow you to build your own custom role without the predefined permissions of default roles allowing for maximum flexibility.\n\n### Build your own role\n\nBuilding a custom role in a system should account for the number of permutations while isolating access for those in strict environments. As we group these resources, we are factoring in that there are a wide range of themes including project management, development, security, and operations.\n\nBelow is a sample of [groupings](https://gitlab.com/jrandazzo/build-your-own-permissions-survey) with a permission selection that could apply to a developer. These resource groups may become finer over time based on requests.\n\n![custom roles - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098994/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098994382.png)\n\n### Building a role from a template\n\nYou may have experienced building permission sets as a starting point to simplify the assignment of user access. As you build out a custom role, you could start with a template that copies predefined permissions from a default role or specific user types such as a Project Manager.\n\n## How to contribute\n\nWe value your feedback and there are multiple ways to contribute:\n- We created a “build your own role” survey to understand how an organization would create a least privilege user in GitLab. Here is a [survey link](https://forms.gle/ucx9CNqqUbVVyAse9) to validate our initial assumptions on permission groupings.\n- Would you like to submit ideas or share feedback based on custom roles? Here is the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/439638).\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[680,750,9,836],{"slug":1050,"featured":93,"template":683},"how-to-tailor-gitlab-access-with-custom-roles","content:en-us:blog:how-to-tailor-gitlab-access-with-custom-roles.yml","How To Tailor Gitlab Access With Custom Roles","en-us/blog/how-to-tailor-gitlab-access-with-custom-roles.yml","en-us/blog/how-to-tailor-gitlab-access-with-custom-roles",{"_path":1056,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1057,"content":1063,"config":1070,"_id":1072,"_type":16,"title":1073,"_source":18,"_file":1074,"_stem":1075,"_extension":21},"/en-us/blog/ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups",{"title":1058,"description":1059,"ogTitle":1058,"ogDescription":1059,"noIndex":6,"ogImage":1060,"ogUrl":1061,"ogSiteName":697,"ogType":698,"canonicalUrls":1061,"schema":1062},"CI/CD automation: Maximize 'deploy freeze' impact across GitLab groups","Learn the benefits of managing deploy freezes at the group level and follow step-by-step guidance on implementation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667913/Blog/Hero%20Images/clocks.jpg","https://about.gitlab.com/blog/ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"CI/CD automation: Maximize 'deploy freeze' impact across GitLab groups\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christian Nnachi\"}],\n        \"datePublished\": \"2024-02-08\",\n      }",{"title":1058,"description":1059,"authors":1064,"heroImage":1060,"date":1066,"body":1067,"category":14,"tags":1068},[1065],"Christian Nnachi","2024-02-08","In the dynamic landscape of continuous integration and continuous deployment ([CI/CD](https://about.gitlab.com/topics/ci-cd/)), maintaining system stability during critical periods such as holidays, product launches, or maintenance windows can be challenging. Introducing new code during peak activity times raises the risk of issues affecting user experience. To strike a balance between innovation and stability, organizations may require a group-level deploy freeze — a strategic pause in deploying new code changes across groups to certain branches or environments.\n\n**Given that GitLab can be used for both continuous integration and continuous deployment efforts, GitLab's [Deploy Freeze](https://docs.gitlab.com/ee/user/project/releases/index.html#prevent-unintentional-releases-by-setting-a-deploy-freeze)** capability aims to address this exact need.\n\nScoped at the project level, deploy freezes can prevent unintended production releases during a period of time you specify by setting a deploy freeze period. Deploy freezes help reduce uncertainty and risk when continuously deploying changes for a single project.\n\nMost teams, however, do not have a single project that represents all of their production environment. Given that deploy freezes are set at the project level, managing and enforcing deploy freezes across many projects can be an arduous and error-prone task, leading to unpredictability and disruption. The need for an automated cross-project solution to ensure stability is obvious.\n\n## What is a group deploy freeze?\n\nThe [Group Deploy Freeze project](https://gitlab.com/cnnachi-demo/freezeperiods) takes the concept of individual project deploy freezes to the next level. It enables you to enforce the same deployment restrictions across one or many projects within a GitLab group from the GitLab UI.\n\nWhether you're managing a large suite of microservices or a collection of related projects, a group-managed deploy freeze solution provides a centralized mechanism to maintain stability.\n\n### Benefits of group deploy freeze\n\n**1. Centralized control**\n\nAdherence to your deployment strategy by allowing you to manage deploy freezes for multiple projects from a single location. This simplifies the process and reduces human errors.\n\n**2. Group-wide synchronization**\n\nEnforcing deploy freezes across an entire GitLab group ensures that all projects receive the same schedule at the same time. This maintains uniformity across your projects.\n\n**3. Streamlined collaboration**\n\nVisibility of changes to your development and operations teams can align their efforts effectively.\n\n## How to use GitLab Group Deploy Freeze\n\nWith [Group Deploy Freeze](https://gitlab.com/demos/solutions/group-deploy-freeze), GitLab CI becomes a general-purpose automation tool for ops-related changes, like setting deploy freezes on many projects.\n\nIn the following steps, you will successfully set up the Group Deploy Freeze feature. Remember to test thoroughly and consider any specific nuances of your team's deployment process.\n\n### Prerequisites\n\n- **GitLab account -** You need an active GitLab account with the necessary permissions to access and manage the projects within the target GitLab group.\n- **GitLab Personal Access Token (PAT) -** Generate a GitLab PAT with the permissions to read and write to the projects within the target GitLab group via the GitLab API. This token will be used by the Python script to authenticate API requests.\n- **Python environment -** Ensure that you have a Python environment set up on your machine or the environment where you plan to run the Python script. The script is written in Python, so you need a compatible Python interpreter.\n- **Python libraries -** Install the required Python libraries used by the script. These include requests, envparse, and python-gitlab. You can use pip to install these libraries.\n- **GitLab Group details -** Identify the GitLab group for which you want to manage deploy freezes. You'll need the group's slug (path) to specify which group the script will operate on.\n- **Time zone selection -** Decide on the time zone in which you want to schedule the deploy freezes. The time zone selection ensures that freeze periods are accurately timed based on your organization's preferred time zone.\n\n### Getting started\n\nTo use GitLab CI to author and automate the process of batch updating deploy freezes for all projects, fork the [Deploy Freeze project](https://gitlab.com/cnnachi-demo/freezeperiods), which will then create a CI/CD pipeline that iterates through your projects and applies the desired deploy freeze schedule. You can customize this project to fit your organization's workflow.\n\nThe provided project contains a `.gitlab-ci.yml` file and a Python script designed to automate the management of deploy freezes for multiple projects within a GitLab group. It uses the GitLab API and various Python libraries to create and delete deploy freeze periods, and is designed to be run as part of a CI/CD pipeline to ensure code stability during deployments within a GitLab group.\n\n### Commit and push changes\n\nCommit and push the changes to your repository to trigger the CI/CD pipeline.\n\n### Pipeline execution\n\n- In the [Group Deploy Freeze project](https://gitlab.com/demos/solutions/group-deploy-freeze) on the GitLab UI, go to Pipelines.\n- Select the \"Run Pipeline\" option on the top right corner of the page.\n- You should see the variables defined in the `.gitlab-ci.yml` file like:\n![Set variables](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676891/Blog/Content%20Images/Screenshot-2023-09-06-at-12-08-48-PM.png)\n- Define the values of the variables `FREEZE_START`, `FREEZE_END`, `CRON_TIME_ZONE` and `GROUP_SLUG`, then run the pipeline. You can define multiple freeze periods by skipping to the next line within the `FREEZE_START` and `FREEZE_END` variables.\n- Once the pipeline is successful, the freeze period should be populated in all projects within the defined groups.\n\n## Monitor and verify\n\n- Verify that these deploy freeze periods are being created and managed as intended.\n- Check your GitLab group's projects for deploy freezes during the specified periods.\n![Monitor and verify](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676891/Blog/Content%20Images/Screenshot-2023-09-12-at-2-08-24-PM.png)\n\n## Customization and iteration\n\n- If needed, iterate on the configuration, script, or pipeline based on your organization's requirements.\n- Make adjustments to freeze periods, time zones, project details, or other settings as needed.\n\nYou can optimize the group deploy feature by following the [Deploy freeze](https://docs.gitlab.com/ee/user/project/releases/index.html#prevent-unintentional-releases-by-setting-a-deploy-freeze) documentation, which outlines the steps to set up a `.freezedeployment` job that can conditionally block deployment jobs upon the presence of the `CI_DEPLOY_FREEZE` variable. By including the `.freezedeployment` template and extending it in your project's `.gitlab-ci.yml file`, you can prevent deployments during freeze periods, ensuring code stability. Manual deployment intervention is possible once the freeze period ends, allowing for controlled and predictable deployment processes across the group's projects.\n\n## Results\n\nBy extending deploy freezes to the group level, teams can easily streamline and enhance their deployment strategies to ensure consistency in preventing unintended production release during a period of time specified by you, whether it is a large company event or holiday. With the power of GitLab's API, CI/CD pipelines, and the flexibility of Python scripting, Group Deploy Freeze is your ally in maintaining code stability and predictability across diverse projects.\n\n> Get started with group deploy freezes today by visiting the [Group Deploy Freeze project](https://gitlab.com/cnnachi-demo/freezeperiods).",[111,680,1069,749],"production",{"slug":1071,"featured":6,"template":683},"ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups","content:en-us:blog:ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups.yml","Ci Cd Automation Maximize Deploy Freeze Impact Across Gitlab Groups","en-us/blog/ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups.yml","en-us/blog/ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups",{"_path":1077,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1078,"content":1084,"config":1090,"_id":1092,"_type":16,"title":1093,"_source":18,"_file":1094,"_stem":1095,"_extension":21},"/en-us/blog/jenkins-to-gitlab-migration-made-easy",{"title":1079,"description":1080,"ogTitle":1079,"ogDescription":1080,"noIndex":6,"ogImage":1081,"ogUrl":1082,"ogSiteName":697,"ogType":698,"canonicalUrls":1082,"schema":1083},"Jenkins-to-GitLab migration made easy","Learn why and how to migrate from Jenkins to GitLab with ease by following this step-by-step guide.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663019/Blog/Hero%20Images/AdobeStock_519147119.jpg","https://about.gitlab.com/blog/jenkins-to-gitlab-migration-made-easy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Jenkins-to-GitLab migration made easy\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2024-02-01\",\n      }",{"title":1079,"description":1080,"authors":1085,"heroImage":1081,"date":1087,"body":1088,"category":14,"tags":1089},[1086],"Fernando Diaz","2024-02-01","GitLab is the most comprehensive AI-powered DevSecOps platform. This means that GitLab provides everything needed to plan, develop, and deliver secure software faster, all within one tool.\n\nPlatforms take away the pains and struggles of integrating various tools (DIY DevOps) to enable the software development lifecycle (SDLC). Since Jenkins is not a platform, additional tools are required to complete the SDLC. This DIY DevOps approach introduces toolchain complexity, which creates the following drawbacks:\n\n- Custom support is required for the integration and orchestration of tools\n- Difficulty maintaining/upgrading/securing separate tools\n- Inefficiency in measuring organizational transformation\n- Poor developer experience\n- Additional management/time/budget costs\n- Loss of productivity\n- Context switching and collaboration inefficiencies\n\n\u003Ccenter>\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175993/Blog/ikr97sr9jclddeqdg7ew.png\" alt=\"Import project selection\">\n   \u003Cfigcaption>DIY DevOps vs. DevSecOps Platform\u003C/figcaption>\n\u003C/figure>\n\u003C/center>\n\u003Cp>\u003C/p>\n\nFor these reasons, many Jenkins teams are considering migrating to a DevSecOps platform. If you are looking for a more powerful, reliable, and secure solution, GitLab is your best option! GitLab is free to get started with and offers different subscription tiers based on the needs of your organization. To learn more about our offerings and features, check out our [pricing page](https://about.gitlab.com/pricing/).\n\nIn this blog, you will learn:\n- How to plan for a migration\n- How to migrate repositories from other source code management (SCM) tools to GitLab\n- How to migrate CI/CD pipelines from Jenkins to GitLab\n- Additional migration considerations\n\n### Planning for a migration\n\nBefore starting a migration from another tool to GitLab CI/CD, you should begin by developing a migration plan. A migration plan is an important technical step for setting expectations. CI/CD tools differ in approach, structure, and technical specifics, meaning that migrations are not just 1:1 mappings of data. A migration plan will provide the following benefits:\n- Sets and communicates a clear vision of what your migration goals are, which helps your users understand why the effort is worth it. The value is clear when the work is done, but people need to be aware while it’s in progress too.\n- Provides sponsorship and alignment from the relevant leadership teams helps with the point above.\n- Spends time educating users on what’s different.\n- Finds ways to sequence or delay parts of the migration and prevent non-migrated (or partially migrated) states for too long.\n- Documents advantages of the improvements that GitLab CI/CD offers, and updates your implementation as part of the transition.\n\nA migration plan will allow you to put a process in place where you can slowly migrate to GitLab with minimal disruption. This may include running both Jenkins and GitLab, while certain projects are moved to GitLab and offloaded from Jenkins.\n\n### Defining a change management process\n\nThe migration plan should define an effective change management process. Developers, IT Operations, Cloud Administrators, Security, and Quality Engineers may not have experience with GitLab and they may not know why you or your leadership have decided to move in this direction.\n\nThe people this is impacting need to know:\n- __Why__ the change is being made\n- __What__ the future state looks like\n- __How__ the company intends to get there from here\n- __Where__ to go for more information or help \n\nTo this end, you should consider the following steps to manage change across these functional roles: \n- __Analyze the current state__: Document the current state of processes. Gather metrics as a baseline. Identify what's working and not working with CI/CD by interviewing key team members. Document the challenges you uncover both quantitatively and qualitatively. You’re going to have to sell the vision and reason for the change, so the more clearly you can define the problem set, the easier it will be to gain buy-in from across the business. \n- __Establish a vision__: Now that you have current pain points outlined quantitatively with baseline metrics and qualitatively (in the words of your team members), communicate a vision of the future state. Explain why it's important (tie this to business success metrics). Provide live and recorded demonstrations of what good looks like and compare it to the current state. Reinforce this message through multiple channels and media — chat groups, all-hands meetings, email notifications, banner notifications on GitLab, etc.\n- __Educate the workforce__: Invest in [GitLab CI/CD Training](https://about.gitlab.com/services/education/gitlab-ci/) delivered by a GitLab expert. Measure knowledge acquisition and retention using [GitLab Certifications](https://levelup.gitlab.com/pages/certifications). \n- __Communicate roadmap and resources__: Communicate to your team members the intended timeline, available resources to help them transition, and community resources like chat groups, Q&A boards, or GitLab Influencer office hours so they can ask questions and get help. Bonus points for building a reward system to incentivize teams to transition early and share their experience with their peer application groups!\n\nIf you have these elements in place as you begin this transition, you will have a framework for success. \n\n### Establishing migration goals\nBefore performing a migration, you should have a good understanding of your goals and how to meet them. For example, some questions you should have answers to are as follows:\n- What is your timeline to migrate?\n- How is your Jenkins server currently configured?\n- How many projects must be migrated?\n- What is the complexity of your pipeline?\n- Does it require external dependencies, multiple pipeline triggers, parallel builds, etc.?\n- How/Where do you deploy your code?\n- What is the release/review process for deploying code?\n- Is it integrated into Jenkins, or a separate workflow triggered by Jenkins?\n- Which build artifacts or binaries are required for pipeline success?\n- Which plugins are used by jobs in Jenkins today?\n- Which software is installed on the Jenkins agents?\n- What SCM solution are you currently using?\n- Are there any shared libraries in use within your Jenkins jobs?\n- Which authentication method is used for Jenkins (Basic auth, LDAP/AD, SSO)?\n- Are there other projects that you need to access from your pipeline?\n- Are there credentials in Jenkins used to access outside services?\n\nBy answering these questions you’ll know how to proceed with the migration, how long it will take, and where to start. Once you have built a plan and are confident of the expectations and possible pitfalls, you can begin the migration process.\n\n### Prerequisites for migration\nOnce you have created a migration plan and addressed all the expectations of the migration, you can begin to set up GitLab. Some of the prerequisites suggested for migration are as follows:\n- Get familiar with GitLab. Read about the [key GitLab CI/CD features](https://docs.gitlab.com/ee/ci/index.html).\n- Follow tutorials to create your first [GitLab pipeline](https://docs.gitlab.com/ee/ci/quick_start/index.html) and [more complex pipelines](https://docs.gitlab.com/ee/ci/quick_start/tutorial.html) that build, test, and deploy a static site.\n- Review the [.gitlab-ci.yml keyword reference](https://docs.gitlab.com/ee/ci/yaml/index.html).\n- Set up and configure GitLab.\n- Test your GitLab instance.\n\nOnce you understand GitLab and an instance has been configured, you can work through your migration plan and begin to move projects from Jenkins over to GitLab. Make sure your GitLab instance has been properly set up using GitLab best practices and [reference architectures](https://docs.gitlab.com/ee/administration/reference_architectures/).\n\n### Migrating repositories to GitLab\nOne of the main drawbacks of Jenkins is that it does not provide an SCM solution. If you are using Jenkins, your code must be stored in a separate SCM solution which Jenkins must have access to. Because GitLab has built-in SCM, migrating away from Jenkins also allows you to migrate from the SCM solution you were leveraging, bringing forth an additional reduction in costs.\n\nGitLab provides tools to allow you to easily move your repository and its metadata into GitLab. The following importers are included to assist in migrating your projects to GitLab:\n\n- [GitHub](https://docs.gitlab.com/ee/user/project/import/github.html)\n- [Another GitLab instance](https://docs.gitlab.com/ee/user/project/settings/import_export.html)\n- [Bitbucket Cloud](https://docs.gitlab.com/ee/user/project/import/bitbucket.html)\n- [Bitbucket Server](https://docs.gitlab.com/ee/user/project/import/bitbucket_server.html)\n- [FogBugz](https://docs.gitlab.com/ee/user/project/import/fogbugz.html)\n- [Gitea](https://docs.gitlab.com/ee/user/project/import/gitea.html)\n- [Jira (Issues only)](https://docs.gitlab.com/ee/user/project/import/jira.html)\n- [Repo by manifest file](https://docs.gitlab.com/ee/user/project/import/manifest.html)\n- [Repo by URL](https://docs.gitlab.com/ee/user/project/import/repo_by_url.html)\n\n\u003Ccenter>\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176002/Blog/ie2xrexhbcoq6m8rnhit.png\" alt=\"GitHub to GitLab Repo Exporter\">\n   \u003Cfigcaption>GitHub to GitLab Repo Exporter\u003C/figcaption>\n\u003C/figure>\n\u003C/center>\n\u003Cp>\u003C/p>\n\nEach importer imports different data from a project. Read the [import and migrate projects documentation](https://docs.gitlab.com/ee/user/project/import/) to learn more about the provided importers to see what data is migrated to GitLab. Additionally, you can [automate group and project import](https://docs.gitlab.com/ee/user/project/import/#automate-group-and-project-import) and build a custom solution to further suit the needs of your organization:\n\n- [Professional Services](https://about.gitlab.com/services/)\n- [Migration Utilities](https://gitlab.com/gitlab-org/professional-services-automation/tools/migration/congregate/-/blob/master/docs/using-congregate.md#quick-start)\n- [Frequently Asked Migration Questions](https://gitlab.com/gitlab-org/professional-services-automation/tools/migration/congregate/-/blob/master/customer/famq.md)\n\n### How to migrate a repository\nMigrating a repository to GitLab is easy using our built-in importers. In this example, I’ll show how to copy a repo from GitHub to GitLab along with [its resources](https://docs.gitlab.com/ee/user/project/import/github.html#imported-data) (Issues, Pull Requests, Milestones, etc.). In order to migrate a repository from another GitHub to GitLab, you can follow the steps below:\n\n1. On the left sidebar, at the top, select **Create new (+)**.\n2. Select **New project/repository** under the In GitLab section.\n3. Select **Import project**.\n\n\u003Ccenter>\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176017/Blog/boowmmaqhbredxa3g92s.png\" alt=\"Import project selection\">\n   \u003Cfigcaption>Import project selection\u003C/figcaption>\n\u003C/figure>\n\u003C/center>\n\u003Cp>\u003C/p>\n\n4. Click the **GitHub** button.\n    - If using GitLab self-managed, then you must [enable the GitHub importer](https://docs.gitlab.com/ee/administration/settings/import_and_export_settings.html#configure-allowed-import-sources).\n    - Note that other importers can be initiated in the same way.\n5. Now you can either:\n    - Authorize with GitHub OAuth: Select **Authorize with GitHub**.\n    - Or, use a GitHub personal access token:\n       - Go to [https://github.com/settings/tokens/new](https://github.com/settings/tokens/new).\n       - In the **Note** field, enter a token description.\n       - Select the repo scope.\n       - Optionally to import collaborators, select the              **read:org** scope.\n       - Click the **Generate token** button.\n       - On the GitLab import page, in the Personal Access Token field, paste the GitHub personal access token.\n6. Click the **Authenticate** button.\n7. Select the items you wish to migrate.\n8. Select the projects you wish to migrate and to where.\n9. Press the **Import** button.\n\nNow you should have the imported project in your workspace. For additional guidance on migrating from GitHub to GitLab you can watch this video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/0Id5oMl1Kqs?si=TQ5HI9aMwtzJMiMi\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nOnce you have completed the repository migration, you can set your Jenkins pipeline to leverage the Jenkinsfile within GitLab. This can be done by setting the repository URL via to your newly imported project via the Jenkin pipeline configuration menu:\n\n\u003Ccenter>\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176020/Blog/mu475liw66abcxbu2g6g.png\" alt=\"Jenkins Pipeline SCM settings\">\n   \u003Cfigcaption>Jenkins Pipeline SCM settings\u003C/figcaption>\n\u003C/figure>\n\u003C/center>\n\u003Cp>\u003C/p>\n\nThis is useful for the initial repo migration phase and allows you to use both Jenkins and GitLab in parallel, preventing service disruptions while you work on migrating the CI/CD functionality.\n\nAdditionally, you can leverage the [GitLab Jenkins plugin](https://plugins.jenkins.io/gitlab-plugin/) to assist with migration. This plugin allows GitLab to trigger and obtain the status of Jenkins builds.\n\n### Migrating CI/CD pipelines\nOnce you have migrated your repositories to GitLab, you can proceed to migrate your Jenkins pipelines to GitLab. This process can be fairly straightforward, but requires an understanding of both Jenkins and GitLab concepts and syntax.\n\nJenkins provides two different types of syntax for defining pipelines, Declarative and Scripted. In this guide we will be covering migrating from Declarative pipelines since they are the most commonly used.\n\n### Step-by-step pipeline migration\nIn this tutorial we will analyze a Jenkinsfile (Groovy) alongside a GitLab CI/CD configuration file (YAML) that builds, tests, and deploys a microservice written in Golang. We will then proceed to enable the pipeline within GitLab and see its results. The pipeline will:\n\n- Use the golang container image with the **alpine** tag\n- Run a job for building the Golang code into an executable binary\n   - Stores the built executable as an artifact\n- Run a job to run unit tests\n- Run a job to deploy to staging\n   - Only executes if the commit targets the **staging** branch\n   - Starts after the **test** stage succeeds\n   - Uses the built executable artifact from the earlier job\n\nBelow you can see Jenkins and GitLab pipeline definitions along with descriptive comments. You can see the pipeline in action in the [Meow Migration project](https://gitlab.com/gitlab-de/projects/blogs/meow-migration).\n\nLet's take a look at a Jenkinsfile written in Groovy:\n\n```  \n// The top-level of the declarative\n// pipeline.\npipeline {\n\n  // Defines the default agent to use\n  // when it is not explicitly defined\n  // in a job.\n    agent any\n\n  // Defines the stages that will run\n  // in numerical order. Each stage\n  // only runs one job.\n    stages {\n\n    // Defines the name of the stage\n        stage('build') {\n      // Defines the container image to\n      // use for this job, overwriting\n      // the default 'agent any'.\n      // The Jenkins Docker plugin\n      // must be configured for this\n      // to run.\n            agent { docker 'golang:alpine' }\n\n      // Defines the sequence of steps\n      // to execute when the stage is\n      // run.\n            steps {\n                sh 'go build -o bin/meow-micro'\n                sh 'chmod +x bin/meow-micro'\n            }\n\n      // The steps to run after the\n      // stage completes.\n            post {\n              always {\n\n        // Stores the stage artifacts\n        // generated for use in another\n        // job.\n                archiveArtifacts artifacts: 'bin/meow-micro'\n                onlyIfSuccessful: true\n              }\n            }\n        }\n\n    stage('test') {\n            agent { docker 'golang:alpine' }\n            steps {\n                sh 'go test .'\n            }\n        }\n\n        stage('deploy') {\n      // Defines conditions which must\n      // be met in order for the job to\n      // execute. In this case the\n      // deploy job will only run on the \n      // staging branch.\n            when {\n              branch 'staging'\n            }\n            steps {\n                echo 'Deploying meow-micro to staging'\n        // Uses the artifact stored in\n        // the build stage.\n                sh './bin/meow-micro'\n            }\n        }\n    }\n}\n```\n\nNow, let's see how to create the same functionality in GitLab:\n\n```\n# Defines the default image to use\n# when it is not explicitly defined in\n# a job.\ndefault:\n  image: alpine:latest\n\n# Defines the order to run the stages.\n# Each stage can have multiple jobs.\nstages:\n  - build\n  - test\n  - deploy\n\n# Defines the name of the job\ncreate-binary:\n # Defines the stage the job will run in\n  stage: build\n # Defines the container image to use\n # for this job, overwriting default.\n  image: golang:alpine\n # Defines the sequence of steps to\n # execute when the job is run.\n  script:\n    - go build -o bin/meow-micro\n    - chmod +x bin/meow-micro\n # Stores the job artifacts generated\n # for use in another job.\n  artifacts:\n    paths:\n      - bin/meow-micro\n    expire_in: 1 week\n\nunit-tests:\n  stage: test\n  image: golang:alpine\n  script:\n    - go test .\n # Defines commands to run after the\n # job.\n after_script:\n  - echo \"Tests Complete\"\n\nstaging-deploy:\n  stage: deploy\n # Defines commands to run before the\n # actual job.\n  before_script:\n    - apk update\n  script:\n    - echo \"Deploying meow-micro to staging environment\"\n    - ./bin/meow-micro\n # Defines conditions which must be met\n # in order for this job to execute. In\n # this case the staging-deploy job will \n # only run on the staging branch.\n  rules:\n    - if: $CI_COMMIT_BRANCH == 'staging'\n # Allows the artifact stored in the\n # build job to be used in this job.\n  artifacts:\n    paths:\n      - bin/meow-micro\n```\n\nAs you may have observed, there are many similarities between both Jenkins and GitLab in terms of syntax, making pipeline migration straightforward. While the above provides a basic example, be sure to read the comprehensive list of [feature and concept comparisons](https://docs.gitlab.com/ee/ci/migration/jenkins.html#comparison-of-features-and-concepts) between both tools.\n\nNow that we have an understanding of how to map Jenkins to GitLab we can start creating a pipeline with the same functionality in GitLab. In order to perform the migration of CI/CD, you can go through the following steps:\n\n##### 1. Open the repository you migrated to GitLab in the section above.\n- On the left sidebar, at the top, select **Search or go to…**.\n- Locate your project.\n\n##### 2. Open the [Pipeline Editor](https://docs.gitlab.com/ee/ci/pipeline_editor/).\n- On the left sidebar, Select **Build > Pipeline editor**.\n\u003Ccenter>\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176026/Blog/ecp4jh7epho2oxuegaor.png\" alt=\"Pipeline editor menu\">\n   \u003Cfigcaption>Pipeline editor menu\u003C/figcaption>\n\u003C/figure>\n\u003C/center>\n\u003Cp>\u003C/p>\n\n- Click the **Configure pipeline** button.\n\n\u003Ccenter>\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176029/Blog/nypfh01zhwgvzqc0xz3v.png\" alt=\"Configure pipeline selection\">\n   \u003Cfigcaption>Configure pipeline selection\u003C/figcaption>\n\u003C/figure>\n\u003C/center>\n\u003Cp>\u003C/p>\n\n##### 3. Populate the [.gitlab-ci.yml](https://docs.gitlab.com/ee/ci/yaml/).\n- Add the GitLab CI pipeline code. \n\u003Ccenter>\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176031/Blog/nxi6uxxispyyoiiyvxyg.png\" alt=\"Pipeline editor input\">\n   \u003Cfigcaption>Pipeline editor input\u003C/figcaption>\n\u003C/figure>\n\u003C/center>\n\u003Cp>\u003C/p>\n\n- Verify that the syntax is correct.\n\n\u003Ccenter>\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176037/Blog/x3d4utfsnymye0lvphtf.png\" alt=\"Pipeline syntax validation\">\n   \u003Cfigcaption>Pipeline syntax validation\u003C/figcaption>\n\u003C/figure>\n\u003C/center>\n\u003Cp>\u003C/p>\n\n- Visualize the pipeline.\n\n\u003Ccenter>\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176043/Blog/hipzofpyywjxf62edzfv.png\" alt=\"Pipeline visualization\">\n   \u003Cfigcaption>Pipeline visualization\u003C/figcaption>\n\u003C/figure>\n\u003C/center>\n\u003Cp>\u003C/p>\n\n##### 4. Commit the file to the main branch.\n- Add a commit message.\n- Make sure the branch is set to main.\n- Click the Commit changes button.\n\n\u003Ccenter>\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176048/Blog/nn8bl7rdysabccoycfrk.png\" alt=\"Commit changes dialog\">\n   \u003Cfigcaption>Commit changes dialog\u003C/figcaption>\n\u003C/figure>\n\u003C/center>\n\u003Cp>\u003C/p>\n\nOnce the file has been merged, the defined pipeline will kick off. You can go back to your project and [view the pipeline](https://docs.gitlab.com/ee/ci/pipelines/#view-pipelines) in action by selecting it under your project’s **Build > Pipelines** page. Since it was run on the **main** branch, you will see only the **create-binary** and unit-tests jobs; the **staging-deploy** job only runs on the staging branch.\n\n\u003Ccenter>\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176051/Blog/wfb4k8nkzpg28kpf2pzz.png\" alt=\"Pipeline running on main branch\">\n   \u003Cfigcaption>Pipeline running on main branch\u003C/figcaption>\n\u003C/figure>\n\u003C/center>\n\u003Cp>\u003C/p>\n\nIf we create a staging branch, we can see that the following pipeline is initiated.\n\n\u003Ccenter>\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176053/Blog/e2jxedpolaniotgixpby.png\" alt=\"Pipeline running on staging branch\">\n   \u003Cfigcaption>Pipeline running on staging branch\u003C/figcaption>\n\u003C/figure>\n\u003C/center>\n\u003Cp>\u003C/p>\n\nWhen clicking on a job we can see its output:   \n\n\u003Ccenter>\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176056/Blog/fywzwbzkwcvc9zzakilh.png\" alt=\"create-binary job output\">\n   \u003Cfigcaption>create-binary job output\u003C/figcaption>\n\u003C/figure>\n\u003C/center>\n\u003Cp>\u003C/p>\n\n\u003Ccenter>\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176061/Blog/ekmpd8ecanwwiena9xi9.png\" alt=\"unit-tests job output input\">\n   \u003Cfigcaption>unit-tests job output input\u003C/figcaption>\n\u003C/figure>\n\u003C/center>\n\u003Cp>\u003C/p>\n\n\u003Ccenter>\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176065/Blog/h7nqxszy50xdmnvhalfq.png\" alt=\"staging-deploy job output\">\n   \u003Cfigcaption>staging-deploy job output\u003C/figcaption>\n\u003C/figure>\n\u003C/center>\n\u003Cp>\u003C/p>\n\nYou can see how the artifact is stored in the create-binary job and used in the staging-deploy job. And that's how easy it is to migrate a pipeline from Jenkins to GitLab!\n\n### Additional considerations when migrating\nSome helpful considerations we’ve found to make the deployment process more straightforward are as follows:\n\n- Don't try to replicate tasks into GitLab jobs 1:1. Take some inventory and time to understand what the current pipeline is doing, and which problem it is solving.\n\n- Some Jenkins jobs may be too complex to move over to GitLab right away. For this reason, it may be beneficial to use the [GitLab Jenkins plugin](https://plugins.jenkins.io/gitlab-plugin/) to initiate Jenkins pipelines and view their results directly from GitLab. This allows you to slowly migrate certain actions to GitLab until the whole pipeline can be moved.\n\n- Implement [security scanners and code quality](https://docs.gitlab.com/ee/user/application_security/) using built-in templates provided by GitLab from the start. This will allow you to shift security left, reducing the potential for a breach.\nDon't overcomplicate the CI/CD config and try to use every feature advantage at once. Modularize code and implement it in small iterations.\n\n- Implement monitoring and governance from the start.\n\n- Understand that the GitLab Runner (Go) might behave differently than the Jenkins agent (Java). CPU usage and memory consumption might differ — make sure to compare over time.\n\n- Consider investing in auto-scaling mechanisms, and shut down unneeded resources on the weekend, or outside of working hours.\n\n- Modernize application development by containerizing your jobs. Jenkins jobs are not executed on a container today but on a Jenkins agent running as a VM.\n\nWhile this list is not exhaustive, it does provide a good start on some considerations to take note of. If you need additional help, GitLab provides [professional services](https://about.gitlab.com/get-help/) to support your migration journey.\n\n### Learn more\nThanks for reading! I hope this guide has helped you get a clear understanding of why and how to migrate from Jenkins to GitLab. Not convinced? [Give GitLab a try with our free trial](https://about.gitlab.com/free-trial/), and see the value of a DevSecOps platform!\n\nHere are a few resources where you can learn more about GitLab, the benefits of using a DevSecOps platform, and migrating from Jenkins:\n\n- [Migrating from Jenkins](https://docs.gitlab.com/ee/ci/migration/jenkins.html)\n- [Planning a migration](https://docs.gitlab.com/ee/ci/migration/plan_a_migration.html)\n- [GitLab Project Importers](https://docs.gitlab.com/ee/user/project/import/)\n- [Tutorial: GitHub to GitLab migration the easy way](https://about.gitlab.com/blog/github-to-gitlab-migration-made-easy/)\n- [Video: GitHub to GitLab migration the easy way](https://youtu.be/0Id5oMl1Kqs?feature=shared)\n- [Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment](https://about.gitlab.com/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment/)\n",[111,9],{"slug":1091,"featured":93,"template":683},"jenkins-to-gitlab-migration-made-easy","content:en-us:blog:jenkins-to-gitlab-migration-made-easy.yml","Jenkins To Gitlab Migration Made Easy","en-us/blog/jenkins-to-gitlab-migration-made-easy.yml","en-us/blog/jenkins-to-gitlab-migration-made-easy",{"_path":1097,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1098,"content":1104,"config":1109,"_id":1111,"_type":16,"title":1112,"_source":18,"_file":1113,"_stem":1114,"_extension":21},"/en-us/blog/windows-2022-support-for-gitlab-saas-runners",{"title":1099,"description":1100,"ogTitle":1099,"ogDescription":1100,"noIndex":6,"ogImage":1101,"ogUrl":1102,"ogSiteName":697,"ogType":698,"canonicalUrls":1102,"schema":1103},"Windows 2022 support for GitLab SaaS runners now available","Along with this announcement comes the deprecation of Windows 2019 and the existing tags on Windows runners with GitLab 17.0.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098940/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_674148848_4qGCRe0NRFou2mFHkNhv7m_1750098939992.jpg","https://about.gitlab.com/blog/windows-2022-support-for-gitlab-saas-runners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Windows 2022 support for GitLab SaaS runners now available\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabriel Engel\"}],\n        \"datePublished\": \"2024-01-22\",\n      }",{"title":1099,"description":1100,"authors":1105,"heroImage":1101,"date":1106,"body":1107,"category":14,"tags":1108},[811],"2024-01-22","We are now supporting Windows 2022 on our SaaS runners on Windows, which are currently in Beta. This is an important step in our plan to mature SaaS runners to general availability.\n\nWith this update, we are also announcing the deprecation of Windows 2019 and the existing tags on Windows runners with GitLab 17.0, as we aim to simplify the tags across our portfolio of SaaS runners. The tags change follows our announcement of [removing tags from our small SaaS runner on Linux](https://about.gitlab.com/blog/removing-tags-from-small-saas-runner-on-linux/).\n\n### Changes at a glance\n\n- Windows 2022 is available now.\n- We are changing the tags to `saas-windows-medium-amd64`.\n- With GitLab 17.0, we will deprecate Windows 2019 and with it the tags `shared-windows` and `windows-1809`.\n\n### How to migrate to Windows 2022\n\nTo migrate to using Windows 2022, update the tag in your `.gitlab-ci.yaml` file to `saas-windows-medium-amd64` as such:\n\n```yaml\nwindows-2022:\n  stage: test\n  tags:\n  - saas-windows-medium-amd64\n  script:\n    - echo \"I'm running Windows 2022\"\n```\n\nThe job execution will look like this:\n\n![windows 2022 - migrate](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098959/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098959552.png)\n\nWith this release, we only support and update Windows 2022 for SaaS runners on Windows. Users are not able to select a different image version.\n\nYou can see all updates to our pre-installed software components that ship with our Windows 2022 image under [Supported Windows versions](https://docs.gitlab.com/ee/ci/runners/saas/windows_saas_runner.html#supported-windows-versions).\n\n### Action required until GitLab 17.0\n\nWith GitLab 17.0, jobs configured with any of the deprecated tags `shared-windows` or `windows-1809` will be stuck.\n\nAn example job configuration that will be affected:\n\n```yaml\ntest-invalid-tag:\n  stage: test\n  tags:\n  - shared-windows\n  - windows-1809\n  script:\n    - echo \"I'm affected and will be stuck after 17.0\"\n```\n\nThe stuck job execution will look like this:\n\n![windows 2022 support - stuck job](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098960/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098959552.png)\n\nTo ensure a smooth transition and avoid job disruptions, update the tag in your `.gitlab-ci.yaml` file to `saas-windows-medium-amd64` in the next three months.\n\n## References:\n- [What are SaaS runners?](https://docs.gitlab.com/ee/ci/runners/)\n- [SaaS runners on Windows documentation](https://docs.gitlab.com/ee/ci/runners/saas/windows_saas_runner.html)\n- [Tags - '.gitlab-ci.yml' Keyword Reference](https://docs.gitlab.com/ee/ci/yaml/#tags)",[484,680,837],{"slug":1110,"featured":6,"template":683},"windows-2022-support-for-gitlab-saas-runners","content:en-us:blog:windows-2022-support-for-gitlab-saas-runners.yml","Windows 2022 Support For Gitlab Saas Runners","en-us/blog/windows-2022-support-for-gitlab-saas-runners.yml","en-us/blog/windows-2022-support-for-gitlab-saas-runners",{"_path":1116,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1117,"content":1123,"config":1129,"_id":1131,"_type":16,"title":1132,"_source":18,"_file":1133,"_stem":1134,"_extension":21},"/en-us/blog/inside-dora-performers-score-in-gitlab-value-streams-dashboard",{"title":1118,"description":1119,"ogTitle":1118,"ogDescription":1119,"noIndex":6,"ogImage":1120,"ogUrl":1121,"ogSiteName":697,"ogType":698,"canonicalUrls":1121,"schema":1122},"Inside DORA Performers score in GitLab Value Streams Dashboard ","Learn how four key metrics drive DevOps maturity, helping teams optimize workflows and achieve DevOps excellence.\n","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098908/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_644947854_248JIrEOCaGJdfJdiSjYde_1750098907747.jpg","https://about.gitlab.com/blog/inside-dora-performers-score-in-gitlab-value-streams-dashboard","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside DORA Performers score in GitLab Value Streams Dashboard \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2024-01-18\",\n      }",{"title":1118,"description":1119,"authors":1124,"heroImage":1120,"date":1125,"body":1126,"category":14,"tags":1127},[833],"2024-01-18","The DevOps Research and Assessment ([DORA](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html)) metrics are industry-standard measurements to help better understand the capabilities that drive software delivery and operations performance. GitLab recently added a DORA Performers score panel to the Value Streams Dashboard in the GitLab DevSecOps Platform to visualize the status of the organization's DevOps performance across different projects.\n\nThis new visualization displays a breakdown of the DORA performance levels, designating a score level for each project under a group. Executives can use this visualization to easily identify the highs and lows in DORA scores and understand their organization's DevOps health top to bottom.\n\n> [Try the Value Streams Dashboard today.](https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard/)\n\n## What are DORA metrics?\n\nDuring the past nine years, the DORA team gathered insights from over 36,000 professionals around the globe on how to measure the performance of a software development team. They identified four metrics as key indicators to measure software teams' development effectiveness and efficiency:\n\n- [Deployment frequency](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html#deployment-frequency) and [Lead time for changes](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html#lead-time-for-changes) measure team velocity.\n- [Change failure rate](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html#change-failure-rate) and [Time to restore service](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html#time-to-restore-service) measure stability.\n\nBy analyzing these metrics, teams are able to find areas for improvement, optimize their workflows, and ultimately drive positive business results.\n\nDORA uses these metrics to identify high-performing, medium-performing, and low-performing teams.  These performance levels provide a framework for organizations to assess their DevOps maturity and effectiveness.\n\n![DORA performers](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098929/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098929143.png)\n\nHigh performance indicates that the team is operating at excellent speed and stability in their software delivery, reaching the peak of DevOps maturity.\n\nMedium and low performance levels suggest opportunities for improvement in different aspects of the software development and delivery process.\n\nLet's take a closer look at the DORA definition for each performance level.\n\n![Chart of performance metrics](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098929/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098929144.png)\n\u003Csup>\u003Csub>_Source: [DORA Accelerate State of DevOps report](https://cloud.google.com/blog/products/devops-sre/dora-2022-accelerate-state-of-devops-report-now-out)_\u003C/sub>\u003C/sup>\u003Cp>\u003C/p>\n\n## GitLab definitions for the DORA score performance levels\n\nDORA metrics are available out of the box in the GitLab DevSecOps platform. To enable the score calculation to operate \"out of the box\" with GitLab, we adjust the scoring rules so they work with the platform's unified data model. Read more in the [score definition documentation](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html#dora-performers-score-panel).\n\nThe goal is for organizations to strive for high performance in these metrics, as a high score often correlates with better business outcomes, such as increased efficiency, faster time-to-market, and higher software quality.\n\n## DORA metrics in GitLab\n\nIn addition to the Value Streams dashboard, the DORA metrics are available also on the [CI/CD analytics charts](https://docs.gitlab.com/ee/user/analytics/ci_cd_analytics.html), which show the history of DORA metrics over time, and on [Insights reports](https://docs.gitlab.com/ee/user/project/insights/index.html#dora-query-parameters) where you can create custom charts.\n\nWatch our DORA overview video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n \u003Ciframe src=\"https://www.youtube.com/embed/jYQSH4EY6_U?si=sE9rf_X58BGD2uK9\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Get started today\nYou can get started with the Value Streams Dashboard by [following the instructions](https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard/) in this documentation. Then, to help us improve the value of the Value Streams Dashboard, please share feedback about your experience in this [brief survey](https://gitlab.fra1.qualtrics.com/jfe/form/SV_50guMGNU2HhLeT4).\n",[1128,9,484,680,728],"DevOps",{"slug":1130,"featured":6,"template":683},"inside-dora-performers-score-in-gitlab-value-streams-dashboard","content:en-us:blog:inside-dora-performers-score-in-gitlab-value-streams-dashboard.yml","Inside Dora Performers Score In Gitlab Value Streams Dashboard","en-us/blog/inside-dora-performers-score-in-gitlab-value-streams-dashboard.yml","en-us/blog/inside-dora-performers-score-in-gitlab-value-streams-dashboard",{"_path":1136,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1137,"content":1143,"config":1150,"_id":1152,"_type":16,"title":1153,"_source":18,"_file":1154,"_stem":1155,"_extension":21},"/en-us/blog/gitlab-is-now-available-as-an-aws-codestar-connections-provider",{"title":1138,"description":1139,"ogTitle":1138,"ogDescription":1139,"noIndex":6,"ogImage":1140,"ogUrl":1141,"ogSiteName":697,"ogType":698,"canonicalUrls":1141,"schema":1142},"GitLab is now available as an AWS CodeStar Connections provider","AWS released native CodePipeline integration for GitLab projects and repos, helping to ensure a best-in-class experience when using GitLab and AWS together.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098884/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_397632156_3Ldy1urjMStQCl4qnOBvE0_1750098884409.jpg","https://about.gitlab.com/blog/gitlab-is-now-available-as-an-aws-codestar-connections-provider","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab is now available as an AWS CodeStar Connections provider\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2024-01-11\",\n      }",{"title":1138,"description":1139,"authors":1144,"heroImage":1140,"date":1146,"body":1147,"category":14,"tags":1148},[1145],"Darwin Sanoy","2024-01-11","The GitLab DevSecOps Platform now integrates natively with many AWS services through AWS CodeStar Connections and AWS CodePipeline. This long-awaited integration was recently completed by the AWS CodeSuite service team for GitLab.com SaaS, GitLab Self-Managed, and GitLab Dedicated. AWS CodeStar Connections is a utility layer, which means other AWS services can enable native GitLab integration with less work.\n\nOnce created, CodeStar Connections objects can be used directly to integrate with many AWS services such as:\n- AWS CodePipeline,\n- Amazon CodeWhisperer Customization Capability,\n- AWS Service Catalog\n- AWS Glue\n\nWhen a CodeStar Connection is used to configure a GitLab CodePipeline configuration it can further support:\n- AWS CodeBuild\n- Amazon SageMaker MLOps Projects\n- AWS CodeDeploy\n\nGitLab and AWS have been working at ever deeper levels of technical and business integration to ensure that our co-customers have a best-in-class experience when using GitLab and AWS together.\n\n![AWS CodeStar integration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098901/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098900704.png)\n\nCheck out the complete list of AWS Services that are now directly accessible in the [GitLab AWS Integration Index documentation](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_integration.html).\n\n![CodeStar - New Technology and Solutions for using GitLab and AWS Together ](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098901/Blog/Content%20Images/Blog/Content%20Images/AWS_re_Invent_2023__New_Technology_and_Solutions_for_using_GitLab_and_AWS_Together__4__aHR0cHM6_1750098900705.png)\n\n## Resources\n\n- GitLab [AWS Integration Index documentation](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_integration.html) is a one-stop location for these new integrations as well as existing integrations\n- AWS documentation for [setting up CodeStar Connections with GitLab.com SaaS](https://docs.aws.amazon.com/codepipeline/latest/userguide/connections-gitlab-managed.html)\n- AWS documentation for [setting up CodeStar Connections with self-managed GitLab](https://docs.aws.amazon.com/codepipeline/latest/userguide/connections-gitlab-managed.html)\n - AWS documentation for [configuring AWS CodePipeline integration](https://docs.gitlab.com/ee/user/project/integrations/aws_codepipeline.html)\n- [AWS announcement for GitLab CodePipeline Integration for GitLab SaaS](https://aws.amazon.com/about-aws/whats-new/2023/08/aws-codepipeline-supports-gitlab/) and [AWS announcement for GitLab Self-Managed](https://aws.amazon.com/about-aws/whats-new/2023/12/codepipeline-gitlab-self-managed/)\n\n![codestar-amazonpartnerlogo](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098901/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098900705.png)\n",[1149,111,284,233],"AWS",{"slug":1151,"featured":6,"template":683},"gitlab-is-now-available-as-an-aws-codestar-connections-provider","content:en-us:blog:gitlab-is-now-available-as-an-aws-codestar-connections-provider.yml","Gitlab Is Now Available As An Aws Codestar Connections Provider","en-us/blog/gitlab-is-now-available-as-an-aws-codestar-connections-provider.yml","en-us/blog/gitlab-is-now-available-as-an-aws-codestar-connections-provider",{"_path":1157,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1158,"content":1164,"config":1170,"_id":1172,"_type":16,"title":1173,"_source":18,"_file":1174,"_stem":1175,"_extension":21},"/en-us/blog/managing-gitlab-resources-with-pulumi",{"title":1159,"description":1160,"ogTitle":1159,"ogDescription":1160,"noIndex":6,"ogImage":1161,"ogUrl":1162,"ogSiteName":697,"ogType":698,"canonicalUrls":1162,"schema":1163},"Managing GitLab resources with Pulumi","Learn how Pulumi's infrastructure-as-code tool helps streamline the automation of GitLab CI/CD workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683430/Blog/Hero%20Images/AdobeStock_293854129__1_.jpg","https://about.gitlab.com/blog/managing-gitlab-resources-with-pulumi","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Managing GitLab resources with Pulumi\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Josh Kodroff, Pulumi\"}],\n        \"datePublished\": \"2024-01-10\",\n      }",{"title":1159,"description":1160,"authors":1165,"heroImage":1161,"date":1167,"body":1168,"category":14,"tags":1169},[1166],"Josh Kodroff, Pulumi","2024-01-10","In the ever-evolving landscape of DevOps, platform engineers are increasingly seeking efficient and flexible tools to manage their GitLab resources, particularly for orchestrating continuous integration/continuous delivery (CI/CD) pipelines. [Pulumi](https://pulumi.com?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources) offers a unique approach to infrastructure as code (IaC) by allowing engineers to use familiar programming languages such as TypeScript, Python, Go, and others. This approach streamlines the automation of GitLab CI/CD workflows. Pulumi's declarative syntax, combined with its ability to treat infrastructure as software, facilitates version control, collaboration, and reproducibility, aligning seamlessly with the GitLab philosophy.\n\nLet's explore the power of using Pulumi and GitLab.\n\n## What is Pulumi?\n\nPulumi is an IaC tool that allows you to manage resources in more than 150 supported cloud or SaaS products (including AWS and GitLab, which we will be demonstrating in this post). You can express your infrastructure with Pulumi using popular general-purpose programming languages like TypeScript, Python, and Go.\n\nPulumi is declarative (just like other popular IaC tools you may be familiar with), which means that you only need to describe the desired end state of your resources and Pulumi will figure out the order of create, read, update, and delete (CRUD) operations to get from your current state to your desired state.\n\nIt might seem strange at first to use a general-purpose programming language to express your infrastructure's desired state if you're used to tools like CloudFormation or Terraform, but there are considerable advantages to Pulumi's approach, including the following:\n- **Familiar tooling.** You don't need any special tooling to use Pulumi. Code completion will work as expected in your favorite editor or IDE without any additional plugins. You can share Pulumi code using familiar packaging tools like npm, PyPI, etc.\n- **Familiar syntax.** Unlike with DSL-based IaC tools, you don't need to learn special ways of indexing an array element, or creating loops or conditionals - you can just use the normal syntax of a language you already know.\n\nThe Pulumi product has an open source component, which includes the Pulumi command line and its ecosystem of providers, which provide the integration between Pulumi and the cloud and SaaS providers it supports. Pulumi also offers a free (for individual use) and paid (for teams and organizations) SaaS service called Pulumi Cloud, which provides state file and secrets management, among many other useful features. It’s a widely-supported open-source IaC tool.\n\n## Initializing the project\n\nTo complete this example you'll need:\n\n1. [A Pulumi Cloud account](https://app.pulumi.com?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources). Pulumi Cloud is free for individual use forever and we'll never ask for your credit card. Pulumi Cloud will manage your Pulumi state file and handle any secrets encryption/decryption. Because it's free for individual use (no credit card required), we strongly recommend that you use Pulumi Cloud as your backend when learning how to use Pulumi.\n2. A GitLab account, group, and a GitLab token set to the `GITLAB_TOKEN` environment variable.\n3. An AWS account and credentials with permissions to deploy identity and access management (IAM) resources. For details on how to configure AWS credentials on your system for use with Pulumi, see [AWS Classic: Installation and Configuration](https://www.pulumi.com/registry/packages/aws/installation-configuration/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources).\n\nThis example will use two providers from the [Pulumi Registry](https://www.pulumi.com/registry/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources):\n\n1. The [GitLab Provider](https://www.pulumi.com/registry/packages/gitlab/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources) will be used to manage resources like Projects, ProjectFiles (to initialize our project repository), ProjectHooks (for the integration with Pulumi Cloud), and ProjectVariables (to hold configuration for our CI/CD pipelines).\n2. The [AWS Classic Provider](https://www.pulumi.com/registry/packages/aws/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources) will be used to manage AWS resources to create OpenID Connect (OIDC) connectivity between AWS and GitLab.\n\nYou can initialize your Pulumi project by changing into a new, empty directory, running the following command, and accepting all the default values for any subsequent prompts:\n\n```bash\npulumi new typescript\n```\n\nThis will bootstrap an empty Pulumi program. Now you can import the provider SDKs for the providers you'll need:\n\n```bash\nnpm i @pulumi/aws @pulumi/gitlab\n```\n\nYour `index.ts` file is the entry point into your Pulumi program (just as you would expect in any other Node.js program) and will be the file to which you will add your resources. Add the following imports to the top of `index.ts`:\n\n```typescript\nimport * as gitlab from \"@pulumi/gitlab\";\nimport * as aws from \"@pulumi/aws\";\n```\n\nNow you are ready to add some resources!\n\n## Adding your first resources\n\nFirst, let's define a variable that will hold the audience claim in our OIDC JWT token. Add the following code to `index.ts`:\n\n```typescript\nconst audience = \"gitlab.com\";\n```\n\nThe above code assume you're using the GitLab SaaS (\u003Chttps://gitlab.com>) If you are using a private GitLab install, your value should be the domain of your GitLab install, e.g. `gitlab.example.com`.\n\nThen, you'll use a [Pulumi function](https://www.pulumi.com/docs/concepts/resources/functions/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources) to grab an existing GitLab group by name and create a new public GitLab project in your GitLab group:\n\n```typescript\nconst group = gitlab.getGroup({\n  fullPath: \"my-gitlab-group\", // Replace the value with the name of your GL group\n});\n\nconst project = new gitlab.Project(\"pulumi-gitlab-demo\", {\n  visibilityLevel: \"public\",\n  defaultBranch: \"main\",\n  namespaceId: group.then(g => parseInt(g.id)),\n  archiveOnDestroy: false // Be sure to set this to `true` for any non-demo repos you manage with Pulumi!\n});\n```\n\n## Creating OIDC resources\n\nTo allow GitLab CI/CD to request and be granted temporary AWS credentials, you'll need to create an OIDC provider in AWS that contains the thumbprint of GitLab's certificate, and then create an AWS role that GitLab is allowed to assume.\n\nYou'll scope the assume role policy so that the role can be only be assumed by the GitLab project you declared earlier. The role that GitLab CI/CD assumed will have full administrator access so that Pulumi can create and manage any resource within AWS. (Note that it is possible to grant less than `FullAdministrator` access to Pulumi, but `FullAdministrator` is often practically required, e.g. where IAM resources, like roles, need to be created. Role creation requires `FullAdministrator`. This consideration also applies to IaC tools like Terraform.)\n\nAdd the following code to `index.ts`:\n\n```typescript\nconst GITLAB_OIDC_PROVIDER_THUMBPRINT = \"b3dd7606d2b5a8b4a13771dbecc9ee1cecafa38a\";\n\nconst gitlabOidcProvider = new aws.iam.OpenIdConnectProvider(\"gitlab-oidc-provider\", {\n  clientIdLists: [`https://${audience}`],\n  url: `https://${audience}`,\n  thumbprintLists: [GITLAB_OIDC_PROVIDER_THUMBPRINT],\n}, {\n  deleteBeforeReplace: true, // URLs are unique identifiers and cannot be auto-named, so we have to delete before replace.\n});\n\nconst gitlabAdminRole = new aws.iam.Role(\"gitlabAdminRole\", {\n  assumeRolePolicy: {\n    Version: \"2012-10-17\",\n    Statement: [\n      {\n        Effect: \"Allow\",\n        Principal: {\n          Federated: gitlabOidcProvider.arn,\n        },\n        Action: \"sts:AssumeRoleWithWebIdentity\",\n        Condition: {\n          StringLike: {\n            // Note: Square brackets around the key are what allow us to use a\n            // templated string. See:\n            // https://stackoverflow.com/questions/59791960/how-to-use-template-literal-as-key-inside-object-literal\n            [`${audience}:sub`]: pulumi.interpolate`project_path:${project.pathWithNamespace}:ref_type:branch:ref:*`\n          },\n        },\n      },\n    ],\n  },\n});\n\nnew aws.iam.RolePolicyAttachment(\"gitlabAdminRolePolicy\", {\n  policyArn: \"arn:aws:iam::aws:policy/AdministratorAccess\",\n  role: gitlabAdminRole.name,\n});\n```\n\nA few things to be aware of regarding the thumbprint:\n\n1. If you are self-hosting GitLab, you'll need to obtain the thumbprint from your private GitLab installation.\n2. If you're using GitLab SaaS, it's possible GitLab's OIDC certificate may have been rotated by the time you are reading this.\n\nIn either case, you can obtain the correct/latest thumbprint value by following AWS' instructions contained in [Obtaining the thumbprint for an OpenID Connect Identity Provider](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc_verify-thumbprint.html) in the AWS docs.\n\nYou'll also need to add the role's ARN as a project variable so that the CI/CD process can make a request to assume the role:\n\n```typescript\nnew gitlab.ProjectVariable(\"role-arn\", {\n  project: project.id,\n  key: \"ROLE_ARN\",\n  value: gitlabAdminRole.arn,\n});\n```\n\n## Project hook (optional)\n\nPulumi features an integration with GitLab via a webhook that will post the output of the `pulumi preview` directly to a merge request as a comment. For the webhook to work, you must have a Pulumi organization set up with GitLab as its SSO source. If you don't have a Pulumi organization and would like to try the integration, you can [sign up for a free trial](https://app.pulumi.com/signup?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources) organization. The trial lasts 14 days, will give you access to all of Pulumi's paid features, and does not require a credit card. For full details on the integration, see [Pulumi CI/CD & GitLab integration](https://www.pulumi.com/docs/using-pulumi/continuous-delivery/gitlab-app/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources).\n\nTo set up the webhook, add the following to your `index.ts` file:\n\n```typescript\nnew gitlab.ProjectHook(\"project-hook\", {\n  project: project.id,\n  url: \"https://api.pulumi.com/workflow/gitlab\",\n  mergeRequestsEvents: true,\n  enableSslVerification: true,\n  token: process.env[\"PULUMI_ACCESS_TOKEN\"]!,\n  pushEvents: false,\n});\n```\n\nNote that the above resource assumes that your Pulumi access token is stored as an environment variable. You may want to instead store the token in your stack configuration file. To do this, run the following command:\n\n```bash\npulumi config set --secret pulumiAccessToken ${PULUMI_ACCESS_TOKEN}\n```\n\nThis will store the encrypted value in your Pulumi stack configuration file (`Pulumi.dev.yaml`). Because the value is encrypted, you can safely commit your stack configuration file to git. You can access its value in your Pulumi program like this:\n\n```typescript\nconst config = new pulumi.Config();\nconst pulumiAccessToken = config.requireSecret(\"pulumiAccessToken\");\n```\n\nFor more details on secrets handling in Pulumi, see [Secrets](https://www.pulumi.com/docs/concepts/secrets/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources) in the Pulumi docs.\n\n## Creating a repository and adding repository files\n\nYou'll need to create a git repository (a GitLab project) and add some files to it that will control the CI/CD process. First, create some files that you'll include in your GitLab repo:\n\n```bash\nmkdir -p repository-files/scripts\ntouch repository-files/.gitlab-ci.yml repository-files/scripts/{aws-auth.sh,pulumi-preview.sh,pulumi-up.sh}\nchmod +x repository-files/scripts/{aws-auth.sh,pulumi-preview.sh,pulumi-up.sh}\n```\n\nNext, you'll need a GitLab CI/CD YAML file to describe the pipeline: which container image it should be run in and what the steps of the pipeline are. Place the following code into `repository-files/.gitlab-ci.yml`:\n\n```yaml\ndefault:\n  image:\n    name: \"pulumi/pulumi:3.91.1\"\n    entrypoint: [\"\"]\n\nstages:\n  - infrastructure-update\n\npulumi-up:\n  stage: infrastructure-update\n  id_tokens:\n    GITLAB_OIDC_TOKEN:\n      aud: https://gitlab.com\n  before_script:\n    - chmod +x ./scripts/*.sh\n    - ./scripts/aws-auth.sh\n  script:\n    - ./scripts/pulumi-up.sh\n  only:\n    - main # i.e., the name of the default branch\n\npulumi-preview:\n  stage: infrastructure-update\n  id_tokens:\n    GITLAB_OIDC_TOKEN:\n      aud: https://gitlab.com\n  before_script:\n    - chmod +x ./scripts/*.sh\n    - ./scripts/aws-auth.sh\n  script:\n    - ./scripts/pulumi-preview.sh\n  rules:\n    - if: $CI_PIPELINE_SOURCE == 'merge_request_event'\n```\n\nThe CI/CD process is fairly simple but illustrates the basic functionality needed for a production-ready pipeline (or these steps may be all your organization needs):\n\n1. Run the `pulumi preview` command when a merge request is opened or updated. This will help the reviewer gain important context. Because IaC is necessarily stateful (the state file is what enables Pulumi to be a declarative tool), when reviewing changes reviewers _must have both the code changes and the infrastructure changes to fully understand the impact of changes to the codebase_. This process constitutes continuous integration.\n2. Run the `pulumi up` command when code is merged to the default branch (called `main` by default). This process constitutes continuous delivery.\n\nNote that this example uses the [`pulumi/pulumi`](https://hub.docker.com/r/pulumi/pulumi) \"kitchen sink\" image that contains all the runtimes for all the languages Pulumi supports, along with some ancillary tools like the AWS CLI (which you'll need in order to use OIDC authentication). While the `pulumi/pulumi` image is convenient, it's also quite large (1.41 GB at the time of writing), which makes it relatively slow to initialize. If you're creating production pipelines using Pulumi, you may want to consider creating your own custom (slimmer) image that has exactly the tools you need installed, perhaps starting with one of Pulumi's language-specific images, e.g. [`pulumi/pulumi-nodejs`](https://hub.docker.com/r/pulumi/pulumi-nodejs).\n\nThen you'll need to write the script that authenticates GitLab with AWS via OIDC. Place the following code in `repository-files/scripts/aws-auth.sh`:\n\n```bash\n#!/bin/bash\n\nmkdir -p ~/.aws\necho \"${GITLAB_OIDC_TOKEN}\" > /tmp/web_identity_token\necho -e \"[profile oidc]\\nrole_arn=${ROLE_ARN}\\nweb_identity_token_file=/tmp/web_identity_token\" > ~/.aws/config\n\necho \"length of GITLAB_OIDC_TOKEN=${#GITLAB_OIDC_TOKEN}\"\necho \"ROLE_ARN=${ROLE_ARN}\"\n\nexport AWS_PROFILE=\"oidc\"\naws sts get-caller-identity\n```\n\nFor continuous integration, you'll need a script that will execute the `pulumi preview` command when a merge request is opened. Place the following code in `repository-files/scripts/pulumi-preview.sh`:\n\n```bash\n#!/bin/bash\nset -e -x\n\nexport PATH=$PATH:$HOME/.pulumi/bin\n\nyarn install\npulumi login\npulumi org set-default $PULUMI_ORG\npulumi stack select dev\nexport AWS_PROFILE=\"oidc\"\npulumi preview\n```\n\nFor continuous delivery, you'll need a similar script that will execute the `pulumi up` command when the Merge Request is merged to the default branch. Place the following code in `repository-files/scripts/pulumi-up.sh`:\n\n```bash\n#!/bin/bash\nset -e -x\n\n# Add the pulumi CLI to the PATH\nexport PATH=$PATH:$HOME/.pulumi/bin\n\nyarn install\npulumi login\npulumi org set-default $PULUMI_ORG\npulumi stack select dev\nexport AWS_PROFILE=\"oidc\"\npulumi up -y\n```\n\nFinally, you'll need to add these files to your GitLab Project. Add the following code block to your `index.ts` file:\n\n```typescript\n[\n  \"scripts/aws-auth.sh\",\n  \"scripts/pulumi-preview.sh\",\n  \"scripts/pulumi-up.sh\",\n  \".gitlab-ci.yml\",\n].forEach(file => {\n  const content = fs.readFileSync(`repository-files/${file}`, \"utf-8\");\n\n  new gitlab.RepositoryFile(file, {\n    project: project.id,\n    filePath: file,\n    branch: \"main\",\n    content: content,\n    commitMessage: `Add ${file},`,\n    encoding: \"text\",\n  });\n});\n```\n\nNote that we're able to take advantage of general-purpose programming language features: We are able to create an array and use `forEach()` to iterate through its members, and we are able to use the `fs.readFileSync()` method from the Node.js runtime to read the contents of our file. This is powerful stuff!\n\n## Project variables and stack outputs\n\nYou'll need a few more resources to complete the code. Your CI/CD process will need a Pulumi access token in order to authenticate against the Pulumi Cloud backend which holds your Pulumi state file and handles encryption and decryption of secrets. You will also need to supply name of your Pulumi organization. (If you are using Pulumi Cloud as an individual, this is your Pulumi username.) Add the following to `index.ts`:\n\n```typescript\nnew gitlab.ProjectVariable(\"pulumi-access-token\", {\n  project: project.id,\n  key: \"PULUMI_ACCESS_TOKEN\",\n  value: process.env[\"PULUMI_ACCESS_TOKEN\"]!,\n  masked: true,\n});\n\nnew gitlab.ProjectVariable(\"pulumi-org\", {\n  project: project.id,\n  key: \"PULUMI_ORG\",\n  value: pulumi.getOrganization(),\n});\n```\n\nFinally, you'll need to add a stack output so that we can run the `git clone` command to test out our pipeline. Stack outputs allow you to access values within your Pulumi program from the command line or from other Pulumi programs. For more information, see [Understanding Stack Outputs](https://www.pulumi.com/learn/building-with-pulumi/stack-outputs/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources). Add the following to `index.ts`:\n\n```typescript\nexport const gitCloneCommand = pulumi.interpolate`git clone ${project.sshUrlToRepo}`;\n```\n\n## Deploying your infrastructure and testing the pipeline\n\nTo deploy your resources, run the following command:\n\n```bash\npulumi up\n```\n\nPulumi will output a list of the resources it intends to create. Select `yes` to continue.\n\nOnce the command has completed, you can run the following command to get the git clone command for your GitLab repo:\n\n```bash\npulumi stack output gitCloneCommand\n```\n\nIn a new, empty directory, run the `git clone` command from your Pulumi stack output, e.g.:\n\n```bash\ngit clone git@gitlab.com:jkodroff/pulumi-gitlab-demo-9de2a3b.git\n```\n\nChange into the directory and create a new branch:\n\n```bash\ngit checkout -b my-first-branch\n```\n\nNow you are ready to create some sample infrastructure in our repository. You can use the `aws-typescript` to quickly generate a simple Pulumi program with AWS resources:\n\n```bash\npulumi new aws-typescript -y --force\n```\n\nThe template includes a very simple Pulumi program that you can use to prove out the pipeline:\n\n```bash\n$ cat index.ts\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\nimport * as awsx from \"@pulumi/awsx\";\n\n// Create an AWS resource (S3 Bucket)\nconst bucket = new aws.s3.Bucket(\"my-bucket\");\n\n// Export the name of the bucket\nexport const bucketName = bucket.id;\n```\n\nCommit your changes and push your branch:\n\n```bash\ngit add -A\ngit commit -m \"My first commit.\"\ngit push\n```\n\nIn the GitLab UI, create a merge request for your branch:\n\n![Screenshot demonstrating opening a GitLab Merge Request](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683438/Blog/Content%20Images/create-merge-request.jpg)\n\nYour merge request pipeline should start running:\n\n![Screenshot demonstrating opening a GitLab Merge Request](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683438/Blog/Content%20Images/merge-request-running.jpg)\n\nOnce the pipeline completes, you should see the output of the `pulumi preview` command in the pipeline's logs:\n\n![Screenshot of a GitLab pipeline log showing the output of the \"pulumi preview\" command](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683438/Blog/Content%20Images/pulumi-preview.jpg)\n\nIf you installed the optional webhook, you should see the results of `pulumi preview` posted back to the merge request as a comment:\n\n![Screenshot of the GitLab Merge Request screen showing the output of the \"pulumi preview\" command as a comment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683438/Blog/Content%20Images/merge-request-comment.jpg)\n\nOnce the pipeline has completed running, your merge request is ready to merge:\n\n![Screenshot of the GitLab Merge Request screen showing a successfully completed pipeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683438/Blog/Content%20Images/merge.jpg)\n\nMerging the merge request will trigger the main branch pipeline. (Note that in this screen you will see a failed initial run of CI/CD on the main branch toward the bottom of the screen. This is normal and is caused by the initial upload of `.gitlab-ci/yml` to the main branch without a Pulumi program being present.)\n\n![Screenshot of the GitLab pipelines screen showing a running pipeline along with a passed pipelines](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683438/Blog/Content%20Images/piplines.jpg)\n\nIf you click into the main branch pipeline's execution, you can see your bucket has been created:\n\n![Screenshot of a GitLab pipeline log showing the output of the \"pulumi up\" command](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683438/Blog/Content%20Images/pulumi-up.jpg)\nTo delete the bucket, run the following command in your local clone of the repository:\n\n```bash\npulumi destroy\n```\n\nAlternatively, you could create a merge request that removes the bucket from your Pulumi program and run the pipelines again. Because Pulumi is declarative, removing the bucket from your program will delete it from AWS.\n\nFinally, run the `pulumi destroy` command again in the Pulumi program with your OIDC and GitLab resources to finish cleaning up.\n\n## Next steps\n\nUsing IaC to define pipelines and other GitLab resources can greatly improve your platform team's ability to reliably and quickly manage the resources to keep application teams delivering. With Pulumi, you also get the power and expressiveness of using popular programming languages to express those resources!\n\nIf you liked what you read here, here are some ways you can enhance your CI/CD pipelines:\n\n- Add [Pulumi Policy Packs](https://www.pulumi.com/docs/using-pulumi/crossguard/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources) to your pipeline: Pulumi policy packs allow you to validate that your resources are in compliance with your organization's security and compliance policies. Pulumi's open source [Compliance Ready Policies](https://www.pulumi.com/docs/using-pulumi/crossguard/compliance-ready-policies/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources) are a great place to start on your journey. Compliance Ready Policies contain policy rules for the major cloud providers for popular compliance frameworks like PCI-DSS and ISO27001, and policy packs are easy to integrate into your pipelines.\n- Check out [Pulumi ESC (Environments, Secrets, and Configuration)](https://www.pulumi.com/product/esc/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources): Pulumi ESC makes it easy to share static secrets like GitLab tokens and can even [generate dynamic secrets like AWS OIDC credentials](https://www.pulumi.com/blog/esc-env-run-aws/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources). ESC becomes especially useful when using Pulumi at scale because it reduces the duplication of configuration and secrets that are used by multiple Pulumi programs. You don't even have to use Pulumi IaC to benefit from Pulumi ESC - [Pulumi ESC's command line](https://www.pulumi.com/docs/esc-cli/commands/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources) can be used with any CLI tool like the AWS CLI.",[111,9,284,233],{"slug":1171,"featured":6,"template":683},"managing-gitlab-resources-with-pulumi","content:en-us:blog:managing-gitlab-resources-with-pulumi.yml","Managing Gitlab Resources With Pulumi","en-us/blog/managing-gitlab-resources-with-pulumi.yml","en-us/blog/managing-gitlab-resources-with-pulumi",{"_path":1177,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1178,"content":1184,"config":1189,"_id":1191,"_type":16,"title":1192,"_source":18,"_file":1193,"_stem":1194,"_extension":21},"/en-us/blog/top-10-gitlab-technical-blogs-of-2023",{"title":1179,"description":1180,"ogTitle":1179,"ogDescription":1180,"noIndex":6,"ogImage":1181,"ogUrl":1182,"ogSiteName":697,"ogType":698,"canonicalUrls":1182,"schema":1183},"Top 10 GitLab technical blogs of 2023","2023 was a big year! Catch up on expert insights into DevSecOps, AI, CI/CD, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663000/Blog/Hero%20Images/tanukilifecycle.png","https://about.gitlab.com/blog/top-10-gitlab-technical-blogs-of-2023","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top 10 GitLab technical blogs of 2023\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2024-01-09\",\n      }",{"title":1179,"description":1180,"authors":1185,"heroImage":1181,"date":1186,"body":1187,"category":14,"tags":1188},[746],"2024-01-09","2023 brought fresh insights from experts across GitLab and beyond —  all of them focused on the challenges and opportunities facing DevSecOps teams. From Lockheed Martin to CARFAX, organizations are trying to understand and unlock the power of technologies such as artificial intelligence (AI), CI/CD, security automation, and more. Our experts provided tips, best practices, and tutorials to use throughout the software development lifecycle.\n\nHere are the top 10 technical blogs from what was an incredible year in DevSecOps innovation.\n\n**1. [Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment](https://about.gitlab.com/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment/)**\nLooking for a smooth transition from Jenkins to GitLab? Follow this step-by-step tutorial to learn how GitLab's integrated CI/CD capabilities help deliver high-quality software faster.\n\n**2. [U.S. Navy Black Pearl: Lessons in championing DevSecOps](https://about.gitlab.com/blog/u-s-navy-black-pearl-lessons-in-championing-devsecops/)**\nSigma Defense's director of engineering details what it's like to manage the U.S. Navy's Black Pearl, which uses GitLab as its DevSecOps platform. The DevSecOps champion relays his experience implementing DevSecOps and the benefits of that decision.\n\n**3. [Quickstart guide for GitLab Remote Development workspaces](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/)**\nEnabling developers to work in their preferred environments empowers DevSecOps teams to build and deliver software more efficiently. With these quickstart instructions, developers can create a workspace, use the Web IDE Terminal to install dependencies or start their server, and view their running application.\n\n**4. [Introducing the GitLab CI/CD Catalog Beta](https://about.gitlab.com/blog/introducing-the-gitlab-ci-cd-catalog-beta/)**\nCI/CD catalogs are a game-changer, allowing developers to discover, integrate, and share pre-existing CI/CD components with ease. This tutorial shows how to get the most from this new DevSecOps platform feature.\n\n**5. [Combine GitLab Flow and GitLab Duo for a workflow powerhouse](https://about.gitlab.com/blog/gitlab-flow-duo/)**\nGitLab Flow and GitLab Duo can help organizations achieve significant improvements in end-to-end workflow efficiency that can lead to higher levels of productivity, deployment frequency, code quality and overall security, and production resiliency and availability. Find out how with this step-by-step guide.\n\n**6. [Efficient DevSecOps workflows: Hands-on python-gitlab API automation](https://about.gitlab.com/blog/efficient-devsecops-workflows-hands-on-python-gitlab-api-automation/)**\nThe python-gitlab library is a useful abstraction layer for the GitLab API. Dive into hands-on examples and best practices in this tutorial.\n\n**7. [Building GitLab with GitLab: Why there is no MLOps without DevSecOps](https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops/)**\nAt GitLab, we believe in the power of MLOps, especially when combined with DevSecOps. So follow along as our data scientists adopt DevSecOps practices and enjoy the benefits of automation, repeatable workflows, standardization, and automatic provisioning of infrastructure.\n\n**8. [Explore the Dragon Realm: Build a C++ adventure game with a little help from AI](https://about.gitlab.com/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions/)**\nReaders are invited to create a mystical world while learning how to integrate AI into their coding environment. This tutorial demonstrates how to use GitLab Duo Code Suggestions to create a text-based adventure game, including magical locations to visit and items to procure, using C++. \n\n**9. [How GitLab's Red Team automates C2 testing](https://about.gitlab.com/blog/how-gitlabs-red-team-automates-c2-testing/)**\nThe GitLab Red Team conducts security exercises that simulate real-world threats. They apply professional development practices to using the same open source C2 tools as threat actors. In this tutorial, the GitLab Red Team shares how they implement continuous testing for the Mythic framework, their design philosophy, and a public project that can be forked for use by other Red Teams.\n\n**10. [Building GitLab with GitLab: How GitLab.com inspired Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/)**\nThe design of GitLab Dedicated, our single-tenancy SaaS version of the DevSecOps platform, came from the lessons learned while building GitLab.com. In this peek behind the curtains, learn the considerations that sparked different decisions regarding automation, databases, monitoring, availability, and more – and what the outcome was.\n\nSign up for the GitLab newsletter using the form to the right to receive the latest blogs right in your inbox.\n",[771,111,484,9,750,749],{"slug":1190,"featured":93,"template":683},"top-10-gitlab-technical-blogs-of-2023","content:en-us:blog:top-10-gitlab-technical-blogs-of-2023.yml","Top 10 Gitlab Technical Blogs Of 2023","en-us/blog/top-10-gitlab-technical-blogs-of-2023.yml","en-us/blog/top-10-gitlab-technical-blogs-of-2023",{"_path":1196,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1197,"content":1203,"config":1211,"_id":1213,"_type":16,"title":1214,"_source":18,"_file":1215,"_stem":1216,"_extension":21},"/en-us/blog/building-gitlab-with-gitlab-stress-testing-product-analytics",{"title":1198,"description":1199,"ogTitle":1198,"ogDescription":1199,"noIndex":6,"ogImage":1200,"ogUrl":1201,"ogSiteName":697,"ogType":698,"canonicalUrls":1201,"schema":1202},"Building GitLab with GitLab: Stress-testing Product Analytics","We put Product Analytics through its paces internally to prep it for Beta. Find out what that entailed and how it led to feature improvements.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659740/Blog/Hero%20Images/building-gitlab-with-gitlab-no-type.png","https://about.gitlab.com/blog/building-gitlab-with-gitlab-stress-testing-product-analytics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: Stress-testing Product Analytics\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"James Heimbuck\"},{\"@type\":\"Person\",\"name\":\"Sam Kerr\"}],\n        \"datePublished\": \"2023-12-14\",\n      }",{"title":1198,"description":1199,"authors":1204,"heroImage":1200,"date":1207,"body":1208,"category":14,"tags":1209},[1205,1206],"James Heimbuck","Sam Kerr","2023-12-14","To best understand how your features being developed and shipped are helping you meet your goals, you need data. The previously announced [Product Analytics feature set](https://about.gitlab.com/blog/introducing-product-analytics-in-gitlab/) helps our customers do just that by providing tools to instrument code and process and visualize the data – all within GitLab.\n\n## Privacy first\n\nWe know customer privacy is a big concern for our customers and our customer's customers. As we said in our [announcement blog](https://about.gitlab.com/blog/introducing-product-analytics-in-gitlab/#our-continued-commitment-to-user-privacy):\n\n\u003Cp>\u003Ccenter>\"Product Analytics is designed to honor commonly recognized opt-out signals and we are designing Product Analytics to give you full control over the data being collected on a cluster managed by GitLab or your own.\"\u003C/center>\u003C/p>\n\nNothing about that approach has changed and it is too important not to mention again.\n\n## Customer Zero and the biggest customer\n\nWe are progressing quickly towards the open beta for Product Analytics. We are currently feature-complete for the beta with the managed product analytics stack, [five existing SDKs for instrumentation](https://docs.gitlab.com/ee/user/product_analytics/#instrument-a-gitlab-project), [default dashboards](https://docs.gitlab.com/ee/user/analytics/analytics_dashboards.html#product-analytics), and the recently released  improved Dashboard and Visualization Designer experiences. We are also learning more about what problems our internal users still have that they cannot solve with Product Analytics.\n\nAs we prepare for the Beta release of Product Analytics, it is important for us to know how the Managed Product Analytics stack will stand up to a bigger event load than we are getting from the initial customers and internal users. With our commitment to dogfooding, adding more internal projects was the obvious answer, so we worked with more internal teams to add instrumentation for the Metrics Dictionary and [GitLab Design System](https://design.gitlab.com/) sites.\n\nInstrumenting internal projects gave us additional feedback about the setup of Product Analytics and the usefulness of the Audience and Behavior Dashboards, showing how many users were visiting and what pages they visited. These gave us great insights into the usefulness of Product Analytics, but did not provide the volume of events we needed to really stress test Product Analytics at the scale we wanted. \n\n![product-analytics-default-dashboard-list](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683252/Blog/Content%20Images/product-analytics-default-dashboard-list.png)\n\nAt the same time the Analytics Instrumentation team was hard at work developing an event framework to make instrumentation easier for GitLab developers. This lets the GitLab teams create new features and update existing ones faster to understand how changes impact our users. This also made it much easier and faster to add Product Analytics to GitLab.com, which provided the event volume that would stress test the Product Analytics stack so we could validate our assumptions.\n\nOnce fully enabled, with all page views and events going to the Managed Product Analytics stack, we saw a 17x increase in load above all other internally instrumented projects, receiving over 20 million events a day. That is a lot of events!\n\nBy instrumenting GitLab.com, we were able to see the stress cracks in our infrastructure _before_ introducing the features to users in our Beta. We were able to validate our scaling strategies, identify and resolve query performance concerns, improve the onboarding experience for our upcoming Beta program, and plan future improvements as we work towards [general availability](https://gitlab.com/groups/gitlab-org/-/epics/9902).\n\nWe have also proved to ourselves that Product Analytics can stand up to future customer load without making customers suffer through outages or slowness as we make the stack better.\n\n## What’s next for Product Analytics\n\nThroughout the internal release and the experiment phase, we have been talking to customers about what is and is not working with Product Analytics, especially the [built-in dashboards](https://docs.gitlab.com/ee/user/analytics/analytics_dashboards.html#product-analytics). From that feedback we have a number of improvements in mind that can't all fit here but check out our [Product Analytics direction page](https://about.gitlab.com/direction/monitor/product-analytics/#what-is-next-for-us-and-why) to see the latest on what improvements are coming next.\n\nTalking directly with users of Product Analytics is also informing the next iterations of other features like [Customizable Dashboards](https://gitlab.com/groups/gitlab-org/-/epics/8574) and [Visualization Designer](https://gitlab.com/groups/gitlab-org/-/epics/9386). The team is also exploring ways to [leverage AI](https://gitlab.com/groups/gitlab-org/-/epics/10335) to make it easier to find and understand Product Analytics data. \n\n## Share your feedback\n\nIt is an exciting time in product analytics and we cannot wait for you to try the feature out yourself! You can add ideas or comments to our [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/391970). We look forward to hearing from you!\n\n## Read more \"Building GitLab with GitLab\"\n\n- [Building GitLab with GitLab: How GitLab.com inspired Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/)\n- [Building GitLab with GitLab: Web API Fuzz Testing](https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow/)\n",[9,680,728,1210],"inside GitLab",{"slug":1212,"featured":93,"template":683},"building-gitlab-with-gitlab-stress-testing-product-analytics","content:en-us:blog:building-gitlab-with-gitlab-stress-testing-product-analytics.yml","Building Gitlab With Gitlab Stress Testing Product Analytics","en-us/blog/building-gitlab-with-gitlab-stress-testing-product-analytics.yml","en-us/blog/building-gitlab-with-gitlab-stress-testing-product-analytics",{"_path":1218,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1219,"content":1225,"config":1232,"_id":1234,"_type":16,"title":1235,"_source":18,"_file":1236,"_stem":1237,"_extension":21},"/en-us/blog/enhanced-migration-from-bitbucket-server-and-bitbucket-cloud-to-gitlab",{"title":1220,"description":1221,"ogTitle":1220,"ogDescription":1221,"noIndex":6,"ogImage":1222,"ogUrl":1223,"ogSiteName":697,"ogType":698,"canonicalUrls":1223,"schema":1224},"Enhanced migration from Bitbucket Server and Bitbucket Cloud to GitLab","Learn about performance improvements and more when migrating from Bitbucket Server and Cloud to GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668776/Blog/Hero%20Images/julia-craice-faCwTallTC0-unsplash.jpg","https://about.gitlab.com/blog/enhanced-migration-from-bitbucket-server-and-bitbucket-cloud-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Enhanced migration from Bitbucket Server and Bitbucket Cloud to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Magdalena Frankiewicz\"}],\n        \"datePublished\": \"2023-11-30\",\n      }",{"title":1220,"description":1221,"authors":1226,"heroImage":1222,"date":1228,"body":1229,"category":14,"tags":1230},[1227],"Magdalena Frankiewicz","2023-11-30","_Atlassian is ending support for all Server products in February 2024. Learn more about the [benefits of migrating from Atlassian to GitLab](https://about.gitlab.com/move-to-gitlab-from-atlassian/)._\n\nStarting [from February 15, 2024](https://about.gitlab.com/blog/atlassian-server-ending-move-to-a-single-devsecops-platform/), Atlassian will no longer offer technical support, security updates, or vulnerability fixes for their Server products, including Bitbucket Server.\n\nThrough improvements to our Bitbucket Server and Bitbucket Cloud importers, we've lowered the barrier to switch to GitLab, especially for large Bitbucket projects. We are happy to be able to offer a quick and effortless way to move your data to GitLab!\n\nLet's take a look at some of these improvements.\n\n## Improvements to imports of large projects\n\nGitLab has offered Bitbucket Server and Bitbucket Cloud importers for a long time. However, these importers operated sequentially in only one Sidekiq background job, which led to timeouts on imports of larger projects.\n\nTo solve the timeouts problem, we introduced parallel, asynchronous importers that split the work into smaller background jobs. This change was introduced in:\n\n- [GitLab 16.1](https://gitlab.com/gitlab-org/gitlab/-/issues/411534) for the Bitbucket Server importer\n- [GitLab 16.6](https://gitlab.com/gitlab-org/gitlab/-/issues/412614) for the Bitbucket Cloud importer\n\nThis change:\n\n- ensures that the import process doesn’t time out on a single worker\n- spreads the number of calls we make to Bitbucket API, reducing the risk of running into rate limiting\n\nWe also improved error handling so that errors raised on single objects don't stop the whole import from completing.\n\n## More improvements\n\nRefactoring importers to be parallel was a crucial improvement, but not the only one we have made to our importers. We also worked to:\n\n- improve the integrity of imported data\n- extend the types of data that we import\n\nBecause Bitbucket Server and Bitbucket Cloud are separate products and require separate importers, the improvements we introduced differ for each importer. We describe them in the sub-sections below.\n\n### Bitbucket Server importer\n\nIn GitLab 16.5, we [fixed](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/131894) a problem when imported merged and closed merge requests had no commit data associated with them, leaving the diffs empty.\n\nIn Gitlab 16.3, we began [importing reviewers](https://gitlab.com/gitlab-org/gitlab/-/issues/416611) and in Gitlab 16.6, we began importing [pull request approvals](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/135256).\n\n### Bitbucket Cloud importer\n\nIn GitLab 16.6, we fixed a problem that users encountered when a pull request on Bitbucket Cloud was squashed and merged, and the branch deleted. When these pull requests were imported to GitLab, the resulting merge requests didn't have associated commits. The problem was addressed by associating merge commits to imported merge requests.\n\nNotes on issues and pull requests can contain references (links) to code, issues, comments, pull requests, and more. Previously, these were imported as is, which left comments with strangely formatted, unclickable links. We [fixed this](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/131382) in GitLab 16.6 by converting refs to GitLab refs. Also, we [no longer import deleted notes](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/133208), which caused data errors.\n\nAlso for the Bitbucket Cloud importer, we began [importing LFS objects](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/133182) in GitLab 16.5 and [pull request reviewers](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/131134) in GitLab 16.6.\n\n## Take advantage of importers today\n\nWith the improvements described, the experience of switching from Bitbucket Server or Bitbucket Cloud to the GitLab DevSecOps platform is better than ever! Check out the [Bitbucket Server importer documentation](https://docs.gitlab.com/ee/user/project/import/bitbucket_server.html) or the [Bitbucket Cloud importer documentation](https://docs.gitlab.com/ee/user/project/import/bitbucket.html) to get started today.\n\nFor GitLab self-managed instances, to benefit from parallel Bitbucket Cloud importer, administrators must enable the `bitbucket_parallel_importer` [feature flag](https://docs.gitlab.com/ee/administration/feature_flags.html). The Bitbucket Server importer is always parallel on GitLab self-managed and GitLab.com.",[484,1231,233],"DevOps platform",{"slug":1233,"featured":6,"template":683},"enhanced-migration-from-bitbucket-server-and-bitbucket-cloud-to-gitlab","content:en-us:blog:enhanced-migration-from-bitbucket-server-and-bitbucket-cloud-to-gitlab.yml","Enhanced Migration From Bitbucket Server And Bitbucket Cloud To Gitlab","en-us/blog/enhanced-migration-from-bitbucket-server-and-bitbucket-cloud-to-gitlab.yml","en-us/blog/enhanced-migration-from-bitbucket-server-and-bitbucket-cloud-to-gitlab",{"_path":1239,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1240,"content":1246,"config":1251,"_id":1253,"_type":16,"title":1254,"_source":18,"_file":1255,"_stem":1256,"_extension":21},"/en-us/blog/gitlab-at-aws-re-invent-2023",{"title":1241,"description":1242,"ogTitle":1241,"ogDescription":1242,"noIndex":6,"ogImage":1243,"ogUrl":1244,"ogSiteName":697,"ogType":698,"canonicalUrls":1244,"schema":1245},"GitLab at AWS re:Invent 2023","GitLab and AWS have streamlined development and security for DevSecOps teams. Learn how in lightning talks, sessions, live demos, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664472/Blog/Hero%20Images/gitlabflatlogomap.png","https://about.gitlab.com/blog/gitlab-at-aws-re-invent-2023","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab at AWS re:Invent 2023\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2023-11-22\",\n      }",{"title":1241,"description":1242,"authors":1247,"heroImage":1243,"date":1248,"body":1249,"category":14,"tags":1250},[1145],"2023-11-22","GitLab will be at AWS re:Invent 2023 in Las Vegas, November 27 to December 1, to demonstrate how the GitLab DevSecOps Platform on Amazon Web Services delivers secure, enterprise-grade AI throughout the software development lifecycle. Stop by Booth #1152 in the Security Zone for [lightning talks, live demos, customer sessions, and more](https://about.gitlab.com/events/aws-reinvent/) all week. \n\nMake sure to [check out our event page and calendar](https://about.gitlab.com/events/aws-reinvent/) to find sessions, locations, opportunities to meet with GitLab, and more (note, they do not appear in the AWS event app). Some sessions will also be available on-demand after the conference.\n\nHere are some of the lightning talks GitLab will be presenting:\n\n**Frictionless developer experience: Using human habits to accelerate DevSecOps maturity and increase joy**\n\nGitLab’s long-standing approach to building DevSecOps pipelines aligns with AWS’ new emphasis on frictionless developer experiences. Join this session to learn how the GitLab DevSecOps platform represents a true “shift left” by empowering and streamlining developers’ normal workflow.\n\n[Add to calendar - Nov. 30](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=654966e4f2269af78f005ba1)\n\n**New integrations and solutions for using GitLab and AWS together**\n\nIn recent months, AWS and GitLab have built new service integrations for source control, CI, and CD. You'll learn how GitLab integrates with AWS CodeStar Connections, Amazon CodeGuru, OpenID, and more, as well as development and deployment solutions for Serverless.com Framework and Terraform to AWS.\n\nAdd to calendar\n* [Nov. 28](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=654144eef011a50313dc7113)\n* [Nov. 29](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=654942dfef8fa23b213f0eca)\n* [Nov. 30](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=65494b66a0b8daf9ca33a386)\n\n**Secure and assured Terraform development using GitLab security scanning policies and managed DevOps environments**\n\nThis lightning talk discusses and demonstrates working example code that extends GitLab's existing support for Terraform State management with full lifecycle-managed DevOps environments for merge requests, long-lived pre-production environments, production environments, and one-off experimental environments. Whether you are developing infrastructure as code specifically or embedding it with application code for the sake of easy environment support, this lightning talk has something to offer you.\n\n[Add to calendar - Nov. 28](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=654961043165b6f013635639)\n\n**Secure GitLab CD pipelines to AWS with OpenID Federation, OIDC, and JWT**\n\nGitLab has three ways to authenticate and authorize your CI and CD workloads into AWS environments. Adding and refining OpenID provides the ability to use an industry standard, which is the most advanced of the three. Join us to learn how to accomplish this highly secure integration option.\n\n[Add to calendar - Nov. 29](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=6549580763edc0caa46ea061)\n\n**Security intelligence through full integration of Amazon CodeGuru Security into GitLab**\n\nAWS CodeGuru Security has created a full integration that enables you to view scanner results in GitLab merge requests and security dashboards so you can use them to block merges in security policy merge approval rules — just like GitLab’s integrated security scanning results. Attend this lightning talk to learn more.\n\n[Add to calendar - Nov. 28](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=654953f963edc0cdbf6e8c6f)\n\n## GitLab and AWS: The year in review\nThroughout 2023, GitLab and AWS announced partner designations and new service integrations that enable development, security, and operations teams to collaborate more easily, to take advantage of AI at all stages, and to flexibly scale infrastructure to create and deploy secure software faster. \n\n#### AWS recognized GitLab as a partner in several categories\n\n- **AWS DevSecOps Partner Competency Specialty:** This specialty denotes that GitLab makes it easy for customers to [integrate security across every stage](https://about.gitlab.com/blog/aws-devsecops-competency-partner/) of the development and delivery cycles, providing rapid and contextual feedback to development, security, and ops teams.\n\n-  **Amazon Linux 2023 Ready Partner:** Amazon Linux 2023-specific RPM packages are available for GitLab, starting at [Version 16.3.0](https://docs.gitlab.com/ee/administration/package_information/supported_os.html) and for GitLab Runner. Official GitLab support for Amazon Linux 2023 also means GitLab builds the RPM packages and hosts them on our packages infrastructure, Graviton (arm64) and amd64 architectures are both supported. To install GitLab on Amazon Linux 2023, [follow these instructions](https://about.gitlab.com/install/#amazonlinux-2023). \n\nLearn more about [GitLab's AWS partner designations](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_partner_designations.html).\n\n#### AWS CodeStar Connections opens up a host of AWS service integrations\n\nAWS recently completed the integration of GitLab.com SaaS into its AWS CodeStar Connections service. This service is a foundational, shared service used by many other AWS services to connect to Git repositories outside of AWS. As a result, GitLab was immediately available to AWS services once this integration was completed.\n\nGitLab is available at CodeStar Connections throughout many AWS services for connectivity to Git. In addition, using a CodeStar Connection for an AWS CodePipeline opens up other service integrations that primarily rely on CodePipeline as their key integration point.\n\nHere is a visual map of the integrations that are currently available:\n\n![CodeStar Connections integrations](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676883/Blog/Content%20Images/gitlabcodestarconnectionsintegration.png)\n\n#### AI customization with AWS CodeWhisperer\n[AWS CodeWhisperer's customization capability](https://aws.amazon.com/blogs/aws/new-customization-capability-in-amazon-codewhisperer-generates-even-better-suggestions-preview/) leverages CodeSuite Connections, allowing generative code suggestions to take into account the libraries and design patterns of your current application when suggesting new code. It does so with no ingestion of your code into the general LMM creation. AWS CodeWhisperer can be pointed to a GitLab repository. \n\n#### AWS CodeGuru and GitLab Ultimate secure scanning integration\nThe AWS CodeGuru team [built an integration with GitLab CI](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_integration.html#scm-integrations) as part of their build secure scanning capabilities. [Amazon CodeGuru Security findings](https://docs.aws.amazon.com/codeguru/latest/security-ug/get-started-gitlab.html) use GitLab’s vulnerability report formatting, enabling exports to integrate directly into GitLab Ultimate security features such as merge request views, security dashboards, and in-context remediation solutions and training. Importantly, it allows these findings to be addressed by GitLab Security Policy Merge Approval Rules. \n\n#### GitLab's new single-tenant Saas option sits atop AWS\nEarlier this year, GitLab launched [GitLab Dedicated](https://docs.gitlab.com/ee/subscriptions/gitlab_dedicated/), a single-tenancy solution for organizations in highly regulated industries that have complex regulatory, compliance, and data residency requirements. The fully isolated SaaS offering is hosted and managed by GitLab and deployed on AWS in a cloud region of the customer's choosing. [Learn more about how GitLab built GitLab Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/).\n\n## Plan your week at AWS re:Invent\nFill your calendar with GitLab at AWS re:Invent! [Check out our calendar](https://about.gitlab.com/events/aws-reinvent/) of sponsored sessions, lightning talks, live demos, and more throughout the week at Booth #1152.\n",[1149,9,484,284],{"slug":1252,"featured":93,"template":683},"gitlab-at-aws-re-invent-2023","content:en-us:blog:gitlab-at-aws-re-invent-2023.yml","Gitlab At Aws Re Invent 2023","en-us/blog/gitlab-at-aws-re-invent-2023.yml","en-us/blog/gitlab-at-aws-re-invent-2023",{"_path":1258,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1259,"content":1264,"config":1269,"_id":1271,"_type":16,"title":1272,"_source":18,"_file":1273,"_stem":1274,"_extension":21},"/en-us/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment",{"title":1260,"description":1261,"ogTitle":1260,"ogDescription":1261,"noIndex":6,"ogImage":1181,"ogUrl":1262,"ogSiteName":697,"ogType":698,"canonicalUrls":1262,"schema":1263},"Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment","Learn how to migrate from Jenkins to the integrated CI/CD of the GitLab DevSecOps Platform to deliver high-quality software rapidly.","https://about.gitlab.com/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-11-01\",\n      }",{"title":1260,"description":1261,"authors":1265,"heroImage":1181,"date":1266,"body":1267,"category":14,"tags":1268},[702],"2023-11-01","\nIn today's dynamic landscape of software development, certain requirements have become paramount for delivering high-quality software rapidly. These requirements include the need for cloud compatibility, faster development cycles, improved collaboration, containerization, enhanced development experiences, and the integration of AI-driven capabilities for better efficiency and speed. Jenkins, a longstanding and respected continuous integration (CI) tool, has admirably played a role in many teams' software development for years. However, as more teams adopt DevOps/DevSecOps strategies for their software delivery, leveraging the integrated CI that is available in a DevSecOps platform like GitLab can provide benefits that Jenkins does not. \n\nSome organizations find themselves hesitating to migrate, not because they doubt the benefits of a top-tier [CI/CD](https://about.gitlab.com/topics/ci-cd/) solution such as GitLab, but due to the complexities of their existing Jenkins implementations. It's understandable that such a transition can seem daunting. \n\nIn this blog, you'll find several migration strategies to help transition from Jenkins to GitLab and make the process smoother and more manageable.\n\n## Migrating to GitLab\nIt's become evident that for organizations seeking a CI/CD solution that can seamlessly support their evolving demands, GitLab emerges as a powerful game-changer. Let's explore why transitioning to this advanced platform is transformative for Jenkins users.\n\n### Why migrate to GitLab \nBefore we delve into the migration approaches, let's take a moment to understand GitLab CI and what makes it a compelling choice for modern CI/CD needs.\n\n> Try GitLab CI/CD today with [a free trial of Ultimate](https://gitlab.com/-/trials/new).\n\n### GitLab CI overview\nGitLab CI is an integral part of the GitLab [AI-powered](https://about.gitlab.com/gitlab-duo/) DevSecOps Platform, which offers a comprehensive and unified solution for DevSecOps and CI/CD. GitLab's design revolves around streamlining development workflows, fostering collaboration, enhancing security, and ensuring scalability.\n\n### Key features of GitLab CI\nThese are the key features of GitLab CI:\n- **Unified platform:** GitLab CI is more than just a CI/CD tool; it's part of a broader ecosystem that includes source code management, project management, security features, analytics and more. This unified platform streamlines workflows and enhances collaboration among development teams.\n- **Containerization and orchestration:** GitLab CI/CD is designed with containerization in mind, offering native support for Docker and Kubernetes. This enables seamless integration of container technologies into your CI/CD pipelines.\n- **Security by design:** Security is a top priority, and GitLab CI incorporates features such as static code analysis and vulnerability scanning to help teams identify and address security issues early in the development process.\n- **GitOps principles:** GitLab CI aligns with [GitOps principles](https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab/), emphasizing version-controlled, declarative configurations for infrastructure and application deployments. This approach enhances the reliability and repeatability of deployments.\n\nGet familiar with GitLab CI with this tutorial:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/WKR-7clknsA?si=T21Fe10Oa0rQ0SGB\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWith that understanding of GitLab CI's capabilities, let's explore the migration steps and strategies for Jenkins users looking to leverage the benefits of GitLab CI.\n\n## A recommended step-by-step Jenkins-to-GitLab CI migration\nWhen considering a migration from Jenkins to GitLab CI, we strongly recommend following a well-structured, step-by-step approach to ensure a seamless transition. Here's our recommended process:\n1. **Pipeline assessment:** Start by conducting a comprehensive inventory of all your existing pipelines in Jenkins. This initial step will help you gain a clear understanding of the scope and complexity of the migration.\n2. **Parallel migration:** Begin the migration process by selecting individual pipelines and moving them to GitLab CI one at a time. Continue to maintain the use of Jenkins for your ongoing work during this transition to minimize disruptions.\n3. **Code verification:** We advise beginning with verification checks in CI. Run both the Jenkins and GitLab CI pipelines in parallel. This dual approach allows you to directly compare the two workflows and identify any issues in the new GitLab workflows. During this phase, keep the GitLab workflow as an optional choice while Jenkins remains required.\n4. **Continuous validation:** After running both pipelines in parallel for a full iteration, thoroughly evaluate the outcomes from each pipeline. This evaluation should consider various factors, including status codes, logs, and performance. \n5. **GitLab CI transition:** As you gain confidence in the reliability and effectiveness of GitLab CI through the parallel runs, make the transition to the GitLab CI workflow as the required standard while Jenkins continues to operate in the background.\n6. **Jenkins phaseout:** After a second iteration, when you are confident in the performance and stability of GitLab CI, you can begin to remove the Jenkins job from your code verification pipeline. This successful transition will enable you to retire Jenkins from this particular aspect of your CI/CD process.\n\nThis recommended approach ensures that your migration is a gradual evolution, allowing you to identify and address any issues or discrepancies before fully committing to GitLab CI. Running Jenkins and GitLab CI pipelines in parallel provides valuable insights and ensures the effective streamlining of your CI/CD processes.\n\n## Preparing for migration: Training and communication\nTo ensure a smooth and successful migration from Jenkins to GitLab CI, follow these essential steps:\n- **Stakeholder communication:** Start by announcing your migration plans and timelines to all relevant stakeholders. This includes DevOps teams, developers, and QA engineers. Transparency in communication is crucial to ensure that everyone understands the objectives and expectations of the migration.\n- **Knowledge-level training:** Conduct knowledge-level training sessions for your teams to promote GitLab CI adoption.\nCover topics such as using GitLab CI, understanding the YAML syntax, and how to create a basic pipeline.\nProvide team members with the knowledge and skills necessary to navigate the new GitLab CI environment effectively.\n- **Hands-on learning:** Encourage hands-on learning by pairing up developers.\nCreate opportunities for them to learn from each other's experiences throughout the migration process.\n\nBy following these instructions for training and communication, you'll build a strong foundation for a successful migration, empowering your teams to adapt and thrive in the new environment.\n\n## 3 Jenkins-to-GitLab CI migration strategies\nThere are different strategies to consider. These three strategies offer flexibility, allowing organizations to choose the path that best aligns with their specific needs and resources. Let's explore these strategies in detail to help you make an informed decision about which one suits your organization best.\n\n### Migration Strategy 1: Using GitLab CI for new projects\nThe first migration strategy involves a gradual transition. While you maintain your existing Jenkins infrastructure for ongoing projects, you introduce GitLab CI for new projects. This approach allows you to harness the modern features of GitLab CI without disrupting your current work.\n\n#### Benefits of Migration Strategy 1\nThe benefits of this approach include the following:\n- New projects can leverage GitLab CI's advanced features right from the start. \n- This strategy minimizes the risk of disrupting existing workflows, as your existing Jenkins setup remains intact.\n- Your team can gradually adapt to GitLab CI, building confidence and expertise without the pressure of an immediate full-scale migration.\n\n#### Challenges of Migration Strategy 1\nThe challenges of this approach include the following:\n- Operating two CI/CD platforms simultaneously can introduce complexity, especially in terms of integration and team collaboration.\n- Managing projects on different platforms may require careful coordination to ensure consistency in processes and security practices.\n\nThis strategy offers a smooth and manageable transition by allowing you to harness GitLab CI's strengths for new projects, while your existing Jenkins infrastructure continues to support ongoing work.\n\n### Migration Strategy 2: Migrating only strategic projects\nIn this strategy, you identify specific projects within your organization that stand to benefit the most from the capabilities of GitLab CI. Instead of preparing for a wholesale migration, you start by focusing your efforts on migrating these strategically selected projects first.\n\n#### Benefits of Migration Strategy 2\nThe benefits of this approach include the following:\n- By concentrating on key projects, you can realize significant improvements in those areas where GitLab CI aligns with specific needs.\n- This approach reduces the complexity of migrating everything at once, minimizing the potential for disruptions.\n- You can gradually build confidence with GitLab CI and its benefits before considering further migrations.\n\n#### Challenges of Migration Strategy 2\nThe challenges of this approach include the following:\n- Even though you're not migrating all projects, the chosen projects' migration can still be intricate and require careful planning.\n- Ensuring seamless collaboration between projects on different platforms may require additional attention.\n\nThis strategy allows you to maximize the impact of GitLab CI by focusing on strategic areas, minimizing risk, and gradually gaining experience with the new tool.\n\n### Migration Strategy 3: Migrating everything\nThe third strategy is a comprehensive migration where you commit to moving all your CI/CD processes, projects, and workflows to GitLab CI. This approach aims for uniformity and simplification of CI/CD across all projects. This strategy can benefit from taking an iterative approach. Consider starting with new projects, followed by migrating strategic projects, and then leverage your growing knowledge and experience with GitLab CI to complete the migration of remaining projects. \n\n#### Benefits of Migration Strategy 3\nThe benefits of this approach include the following:\n- Uniform CI/CD processes across all projects can streamline administration and maintenance, reducing complexity.\n- You can take full advantage of GitLab CI's modern capabilities, from Infrastructure as Code to enhanced security features.\n- As your projects grow, GitLab CI is designed to handle increased demands, ensuring long-term scalability.\n\n#### Challenges of Migration Strategy 3\nThe challenges of this approach include the following:\n- A full-scale migration can be intricate, requiring meticulous planning and implementation.\n- The transition may disrupt ongoing projects and require a significant time investment.\n- Investment in training and potential tool migration expenses should be considered.\n\nOpt for this approach if uniformity and consolidation of CI/CD processes are a high priority, and you have the resources to execute a full migration.\n\nThe migration strategy you select should align with your organization's specific needs and circumstances. In all cases, the ultimate goal is to enhance your development process with modern CI/CD tools like GitLab CI, which offers scalability, infrastructure automation, security, and collaboration features that align with today's development needs.\n\n## Technical insights: How the migration works\nMoving your CI/CD workflows from Jenkins to GitLab CI is a transformative journey, and understanding how it works is vital for a successful transition.\n\n### Understanding the configurations: Jenkinsfile vs. .gitlab-ci.yml\nThe heart of your CI/CD pipeline lies in the configurations defined in your Jenkinsfile (for Jenkins) and .gitlab-ci.yml (for GitLab CI). While there are some similarities between these configuration files, there are notable differences as well.\n\n#### Similarities\n- Both files define the stages, jobs, and steps of your CI/CD process.\n- You specify the desired build, test, and deployment steps in both files.\n- Environment variables and settings can be configured in either file.\n\n#### Differences\n- Jenkinsfile uses Groovy for scripting, while .gitlab-ci.yml uses YAML. This change in language affects the way you write and structure your configurations.\n- The process of defining pipelines is more intuitive in .gitlab-ci.yml, with a cleaner, more human-readable syntax.\n- GitLab CI provides a wide range of built-in templates and predefined jobs, simplifying configuration and reducing the need for custom scripting.\n\n### Manually converting the pipeline configuration\nCurrently, migrating your existing Jenkins pipelines to GitLab CI is typically done manually. This means analyzing your Jenkinsfile and re-creating the equivalent configurations in .gitlab-ci.yml. While there are similarities in the concepts and structure, the differences in syntax and the specific capabilities of each platform require careful consideration during the migration.\n\n## Strategic planning for a smooth transition\nMigrating from Jenkins to GitLab CI requires meticulous planning to ensure a seamless transition. It's crucial to assess the disparities between the two systems and evaluate their impact on your workflow, considering aspects like security, cost, time, and capacity.\n\nOnce you've identified these differences and devised your migration strategy, break down the migration into key steps. These include setting up GitLab CI pipelines, securely transferring data from Jenkins to GitLab CI, and integrating GitLab CI into your existing tools and processes. \n\n## Case study: A seamless transition for Lockheed Martin\nLet's look at a real-world case study to illustrate the effectiveness of the \"Migrate Everything\" strategy. [Lockheed Martin](https://about.gitlab.com/customers/lockheed-martin/), the world’s largest defense contractor, had been using Jenkins for several years. As their project portfolio expanded, they realized that their Jenkins implementation with a wide variety of DevOps tools was becoming increasingly complex to manage. They were also eager to adopt modern CI/CD capabilities that Jenkins struggled to provide.\n\nIn collaboration with GitLab, Lockheed Martin decided to undertake a comprehensive migration to GitLab CI. Their goals included achieving consistency in their CI/CD processes, simplifying administration and maintenance, and taking full advantage of The GitLab Platform’s robust features.\n\nThe comprehensive migration strategy proved to be a resounding success for Lockheed Martin. With GitLab CI, they not only streamlined their CI/CD processes but achieved remarkable results. **They managed to run CI pipeline builds a staggering 80 times faster, retired thousands of Jenkins servers, and reduced the time spent on system maintenance by a staggering 90%. This monumental shift resulted in a significant increase in efficiency and productivity for Lockheed Martin.**\n\nThis case study showcases how a comprehensive migration strategy can be effective for organizations looking to leverage GitLab capabilities across all their projects.\n\nFor more in-depth insights into Lockheed Martin's successful transition to GitLab and how it streamlined their software development processes, check out [the detailed case study](https://about.gitlab.com/customers/lockheed-martin/).\n\n## GitLab documentation and support\nFor those embarking on this migration journey, GitLab offers documentation to guide you through the process. You can find valuable resources in GitLab's [official documentation](https://docs.gitlab.com/ee/ci/migration/jenkins.html).\n\nIn addition to documentation, GitLab's Professional Services team is available to assist organizations in their migrations. They bring expertise and experience to ensure a smooth transition. Whether it's understanding the nuances of Jenkinsfile to .gitlab-ci.yml conversion or optimizing your CI/CD workflows, their support can be invaluable.\n\n> Try GitLab CI/CD today with [a free trial of Ultimate](https://gitlab.com/-/trials/new).\n",[749,279,771,233,9,1128],{"slug":1270,"featured":6,"template":683},"jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment","content:en-us:blog:jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment.yml","Jenkins Gitlab Ultimate Guide To Modernizing Cicd Environment","en-us/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment.yml","en-us/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment",{"_path":1276,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1277,"content":1283,"config":1289,"_id":1291,"_type":16,"title":1292,"_source":18,"_file":1293,"_stem":1294,"_extension":21},"/en-us/blog/migrating-from-bamboo-to-gitlab-cicd",{"title":1278,"description":1279,"ogTitle":1278,"ogDescription":1279,"noIndex":6,"ogImage":1280,"ogUrl":1281,"ogSiteName":697,"ogType":698,"canonicalUrls":1281,"schema":1282},"How to migrate from Bamboo to GitLab CI/CD","With the fast approaching EOL of Atlassian Server products, including Bamboo, this blog post explains how users of Bamboo can migrate to GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749658924/Blog/Hero%20Images/securitylifecycle-light.png","https://about.gitlab.com/blog/migrating-from-bamboo-to-gitlab-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to migrate from Bamboo to GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2023-10-26\",\n      }",{"title":1278,"description":1279,"authors":1284,"heroImage":1280,"date":1286,"body":1287,"category":14,"tags":1288},[1285],"Abubakar Siddiq Ango","2023-10-26","\n_Atlassian is ending support for all Server products in February 2024. [Learn more about the benefits of migrating from Atlassian to GitLab here](https://about.gitlab.com/move-to-gitlab-from-atlassian/)._\n\nThe clock is ticking towards the February 15, 2024, [end-of-life date Atlassian has set for their Server products](https://about.gitlab.com/blog/atlassian-server-ending-move-to-a-single-devsecops-platform/). Users whose workflows rely on on-premises deployments of Atlassian Server products are faced with the choice of upgrading to Atlassian’s Data Center or Cloud products or finding alternatives. \n\nOne of the Atlassian products that is impacted is Bamboo, a CI/CD solution. Whether you're simply looking for a new CI/CD tool or you're looking to consolidate your entire toolchain, the Atlassian Server end of life is a great opportunity to make the move to GitLab to take advantage of the automation, scalability, and security of an end-to-end DevSecOps platform.\n\nIn this blog post, we'll cover the steps you can take to migrate your Bamboo CI/CD setup to GitLab CI/CD.\n\n## How is GitLab CI/CD different from Bamboo?\n\n### Organization\n\nBamboo is structured around Projects and Plans. CI/CD jobs are grouped into stages, which are defined in a Bamboo plan along with other configurations that determine how jobs run. Bamboo projects are used to organize plans, which are classified into Build and Deployment plans. \n\nAs the name implies, Build plans can be configured to pull code from configured repositories and generate artifacts. These artifacts are picked by jobs defined in Deployment plans and are deployed to environments configured in Bamboo. Bamboo jobs are also composed of tasks, which can be a script, a task to pull code from a repository, or a task specific to a technology.\n\nYou also need to add code repositories to a Bamboo plan or a project, making it available to all plans under it, and set triggers for how Bamboo detects changes and runs builds.\n\nGitLab is organized differently. Everything is in a single platform, with your CI/CD configuration provided as part of your code in a `.gitlab-ci.yml` file, from a group’s compliance pipeline configuration, or Auto DevOps when enabled and the `.gitlab-ci.yml` file is not found in a project.\n\nGitlab CI/CD configurations are composed of jobs, grouped into stages. How the jobs are triggered can be controlled by CI/CD `rules` and there is no separate configuration for deployments. Deployment jobs can be defined in the same CI/CD script in a `deploy` stage, with the [deployment environment](https://docs.gitlab.com/ee/ci/environments/) set.\n\n### Agents vs Runners\n\nBamboo uses [Agents](https://confluence.atlassian.com/confeval/development-tools-evaluator-resources/bamboo/bamboo-remote-agents-and-local-agents)\nto run builds and deployments. These can be local agents running on the Bamboo server or\nremote agents running external to the server. GitLab uses a similar concept to agents,\ncalled [GitLab Runner](https://docs.gitlab.com/runner/), which uses [executors](https://docs.gitlab.com/runner/executors/)\nto run builds. Examples of executors include SSH, Docker, and Kubernetes. You can choose to\nuse GitLab [SaaS runners](https://docs.gitlab.com/ee/ci/runners/) or deploy your own [self-managed runners](https://docs.gitlab.com/runner/install/index.html).\n\n### Bamboo Specs vs .gitlab-ci.yml file\n\nBamboo is largely configured via the Bamboo UI but can also be configured as code using Bamboo Specs. Bamboo Specs can be defined using Java and other JVM languages or using YAML, with Java having more complete feature coverage than YAML. Bamboo Specs can be defined and stored in spec repositories, then linked to Bamboo projects.\n\nThe `.gitlab-ci.yml` file is central to the CI/CD workflow in GitLab. When included in a project, the defined configurations are executed against the project; otherwise, [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) automatically builds and deploys your application, when enabled. Templates and CI/CD components can also be added to `.gitlab-ci.yml` for complex use cases.\n\n## How GitLab steps up your workflow\n\nIn addition to building and deploying your application, GitLab provides a suite of features that allows for building secure applications fast and efficiently. These include:\n\n- [Application security](https://docs.gitlab.com/ee/user/application_security/): GitLab analyzes your application across the stages of the software development lifecycle with security scans such as Static Application Security Testing (SAST), Secret Detection, Infrastructure as Code (IaC) Scanning, Dependency Scanning, License Scanning, Coverage-guided Fuzz Testing, Container Scanning, API Security, Dynamic Application Security Testing (DAST), and Operational Security Scanning.\n- Compliance and security policies: Understanding the results of security scans and putting policies in place is crucial to ensuring secure applications. You can set up Scan Execution or Result policies to ensure additional scans or approval requirements are added to comply with regulatory or self-imposed requirements.\n- [CI/CD catalog](https://docs.gitlab.com/ee/ci/components/catalog.html): Parts of CI/CD configurations that are used across multiple projects can be turned into [components](https://docs.gitlab.com/ee/ci/components/index.html#components-repository) stored in component repositories that are discoverable in the CI/CD catalog.\n- Packages and registries: Custom or local replicas of popular packages can be hosted with the [GitLab Package Registry](https://docs.gitlab.com/ee/user/packages/package_registry/index.html). You can also host container images with the [GitLab Container Registry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html) and Terraform modules with the [GitLab Terraform Module Registry](https://docs.gitlab.com/ee/user/packages/terraform_module_registry/index.html). If you frequently use public images or packages, you can use the [Dependency Proxy](https://docs.gitlab.com/ee/user/packages/dependency_proxy/index.html) to maintain a local cache.\n\n[Learn more about other ways to automate your entire workflow with GitLab CI/CD](https://about.gitlab.com/solutions/continuous-integration/).\n\n## Convert Bamboo Specs to .gitlab-ci.yml script\n\nFor the purpose of this blog post, we will focus on [Bamboo YAML Specs](https://docs.atlassian.com/bamboo-specs-docs/9.3.0/specs.html?yaml). You can export your Bamboo Plans as YAML Spec — [learn more here](https://confluence.atlassian.com/bamboo/exporting-existing-plan-configuration-to-bamboo-yaml-specs-1018270696.html). Now, lets walk through converting your Bamboo YAML Specs into GitLab CI/CD configuration.\n\n### Container image\n\nFirst is defining the container image of the container your jobs will run in. By default, Bamboo uses Agents, which depend on how the host machines are configured. You can replicate the Agent’s environment into a container image hosted in the [GitLab Container Registry](https://docs.gitlab.com/ee/user/packages/container_registry/).  \n\nIf you already run Bamboo jobs in a container image, it will look like this in your spec:\n\n```yaml\n---\nversion: 2\n# ...\ndocker: ubuntu\n```\n\nThis might be defined at the plan or job level. You can define it in GitLab as follows:\n\n```yaml\nimage: ubuntu\n```\n\n[Learn more about running CI/CD jobs in containers here](https://docs.gitlab.com/ee/ci/docker/using_docker_images.html). If your use case does not include containers, you can explore [other executors](https://docs.gitlab.com/runner/executors/).\n\n### Stages\n\nIn Bamboo, stages and their list of jobs are defined first, before the job definitions:\n\n```yaml\nversion: 2\nstages:\n  - First Stage:\n      jobs:\n        - Job 1A \n        - Job 1B\n  - Second Stage:\n      jobs:\n        - Job 2A \n        - Job 2B\n\nJob 1A:\n  tasks:\n    - clean\n    - script\n        - touch file1A.txt\n\nJob 1B:\n  tasks:\n    - clean\n    - script\n        - touch file1B.txt\n\nJob 2A:\n  tasks:\n    - clean\n    - script\n        - touch file2A.txt\n\nJob 2B:\n  tasks:\n    - clean\n    - script\n        - touch file2B.txt\n```\n\nIn GitLab, you list your stages in the order in which you want their jobs to run:\n\n```yaml\nstages:\n  - build\n  - test\n  - deploy\n\njob1:\n  stage: build\n  script:\n    - echo \"This job compiles code.\"\n\njob2:\n  stage: test\n  script:\n    - echo \"This job tests the compiled code. It runs when the build stage completes.\"\n\njob3:\n  script:\n    - echo \"This job also runs in the test stage\".\n\njob4:\n  stage: deploy\n  script:\n    - echo \"This job deploys the code. It runs when the test stage completes.\"\n  environment: production\n```\n\nAll the jobs in a stage run in parallel and when they succeed, execution proceeds to the next stage. This only changes in complex pipelines where a job depends on another using [`needs`](https://docs.gitlab.com/ee/ci/yaml/index.html#needs).\n\n### Variables\n\nBamboo has System, Global, Project, Plan, and Build-specific variables, which can be accessed using the format `${system.variableName}` for system variables and `${bamboo.variableName}` for others. Periods (.) are replaced by underscores (_) when variables are accessed in scripts.\n\n```yaml\nversion: 2\n# ...\nvariables:\n  username: admin\n  releaseType: milestone\n\nDefault job:\n  tasks:\n    - script: echo 'Release Type is $bamboo_releaseType'\n```\n\nIn GitLab, variables can be defined at group, project, CI Script, and job levels. In GitLab self-managed and GitLab Dedicated, administrators can define variables at the instance level. GitLab allows protecting, masking, and expanding variables. Protected variables are only accessible for pipelines running against the default or protected branches. [Learn more about CI/CD variables](https://docs.gitlab.com/ee/ci/variables/index.html) and [where you can use them](https://docs.gitlab.com/ee/ci/variables/where_variables_can_be_used.html).\n\nHere is an example:\n\n```yaml\nvariables:\n  GLOBAL_VAR: \"A global variable\"\n\njob1:\n  variables:\n    JOB_VAR: \"A job variable\"\n  script:\n    - echo \"Variables are '$GLOBAL_VAR' and '$JOB_VAR'\"\n\njob2:\n  script:\n    - echo \"Variables are '$GLOBAL_VAR' and '$JOB_VAR'\"\n```\n\n### Build Jobs\n\nBamboo Build Jobs are composed of tasks, each of which is a small unit of work that can be anything from checking out source code to injecting variables or running a script.\n\n```yaml\nversion: 2\nstages:\n  - Run Tests:\n      jobs:\n        - Test Ruby \n\nTest Ruby :\n  key: TEST\n  tasks:\n  - checkout:\n      force-clean-build: false\n      description: Checkout Default Repository\n  - script:\n      interpreter: SHELL\n      scripts:\n      - |-\n        ruby -v  # Print out ruby version for debugging\n        bundle config set --local deployment true  \n        bundle install -j $(nproc)\n        rubocop\n        rspec spec\n      description: run bundler & rspec\n\n```\n\nIn this example, the plan has two tasks, checkout and script. The checkout tasks pull an updated version of the code repository, which is made available for the script task to execute its commands against. \n\n[Jobs in GitLab](https://docs.gitlab.com/ee/ci/jobs/) are composed of script commands:\n\n```\nimage: ruby:latest\n\nstages:\n  - test\n\nrspec:\n  stage: test\n  script:\n    - ruby -v\n    - bundle config set --local deployment true \n    - bundle install -j $(nproc)\n    - rubocop\n    - rspec spec\n```\n\nIn the example above, the stage the job belongs to is specified with the `stage` keyword and the commands to be executed by the GitLab runner for the job are listed under `script`.\n\nIn Bamboo, you can use [executables](https://confluence.atlassian.com/bamboo/defining-a-new-executable-capability-289277164.html), such as Ant, Maven, or PHPUnit, in a task to build your application. In GitLab, you can package the binaries you need in a custom container image and use it as your CI/CD image.\n\n### Deployment jobs\n\nIn Bamboo, Deployment projects organize software releases or environments applications are deployed to. A deployment plan can have a release definition:\n\n```yaml\n---\nversion: 2\n\ndeployment:\n  name: Release Software\n  source-plan: BUILD-APP\n\nrelease-naming: release-1.1\n```\n\nFor releases, you specify the plan it should get the generated artifacts from. And for deployment for environments:\n\n```yaml\n---\nversion: 2\n# ...\nenvironments:\n  - Test\n  - QA\n  - Prod\n\nTest:\n  tasks:\n    - clean\n    - artifact-download:\n        destination: /workdir\n```\n\nIn GitLab CI/CD, you can create a [deployment job](https://docs.gitlab.com/ee/ci/jobs/#deployment-jobs)\nthat deploys to an [environment](https://docs.gitlab.com/ee/ci/environments/index.html) or create a [release](https://docs.gitlab.com/ee/user/project/releases/).\nFor deploying to an environment, you use the [`environment`](https://docs.gitlab.com/ee/ci/yaml/#environment) keyword:\n\n```yaml\ndeploy-to-production:\n  stage: deploy\n  script:\n    - # Run Deployment script\n    - ./.ci/deploy_prod.sh\n  environment:\n    name: production\n```\n\nIf you are creating a release instead, you use the [`release`](https://docs.gitlab.com/ee/ci/yaml/#release)\nkeyword along with the [release-cli](https://gitlab.com/gitlab-org/release-cli/-/tree/master/docs)\ntool to create releases for [Git tags](https://docs.gitlab.com/ee/user/project/repository/tags/).\nThe `release` section is executed after the `script` section, which must exist.\nIf you don’t have any script commands to run, you can put a placeholder command;\nfor example, `echo` a message.\n\n```yaml\nrelease_job:\n  stage: release\n  image: registry.gitlab.com/gitlab-org/release-cli:latest\n  rules:\n    - if: $CI_COMMIT_TAG                  # Run this job when a tag is created manually\n  script:\n    - echo \"Building release version\"\n  release:\n    tag_name: $CI_COMMIT_TAG\n    name: 'Release $CI_COMMIT_TAG'\n    description: 'Release created using the release-cli.'\n```\n\n### Rules and workflows\n\nIn Bamboo, triggers can be used to control how Jobs are executed. Triggers can be a periodic poll of the repository for changes or a webhook that notifies Bamboo of changes to the repository. Trigger conditions can be enabled in the Bamboo web UI to make sure the build only runs if other plans are passing.\n\nExample of a trigger:\n\n```yaml\n---\nversion: 2\ntriggers:\n  - polling: 130\n  - cron: 0 * * * ? *\n```\n\nIn GitLab, [CI/CD pipelines](https://docs.gitlab.com/ee/ci/pipelines/) can be triggered by a commit/push, a merge, manually, on schedule, or with [pipeline subscriptions](https://docs.gitlab.com/ee/ci/pipelines/#trigger-a-pipeline-when-an-upstream-project-is-rebuilt). Jobs in a pipeline can further be controlled using `rules` or `workflow`. Learn more about [Job Control](https://docs.gitlab.com/ee/ci/jobs/job_control.html) and [pipeline workflows](https://docs.gitlab.com/ee/ci/yaml/workflow.html#) in GitLab CI/CD. \n\nHere's an example using `rules` in GitLab CI/CD:\n\n```yaml\nworkflow:\n  rules:\n    - changes:\n      - .gitlab/**/**.md\n      when: never\n```\n In this example, pipelines are never executed when `.md` files changed in the `.gitlab` folder.\n\n#### Artifacts\n\nYou can define Job artifacts using the `artifacts` keyword in both GitLab and Bamboo.\n\nIn Bamboo, artifacts can be defined as follows:\n\n```yaml\n---\nversion: 2\n# ...\n  artifacts:\n    -\n      name: Test Reports\n      location: target/reports\n      pattern: '*.xml'\n      required: false\n      shared: false\n    -\n      name: Special Reports\n      location: target/reports\n      pattern: 'special/*.xml'\n      shared: true\n```\n\nIn the Bamboo Spec above, artifacts are defined with a name, location, pattern, and optionally\nthe ability to share the artifacts with other jobs or plans. You can go further to define jobs that\ncan subscribe to the artifact.\n\n`artifact-subscriptions` is used to access artifacts from another job in the same plan:\n\n```yaml\nTest app:\n  artifact-subscriptions:\n    -\n      artifact: Test Reports\n      destination: deploy\n```\n\n`artifact-download` is used to access artifacts from jobs in a different plan.\n\n```yaml\n---\nversion: 2\n# ...\n  tasks:\n    - artifact-download: \n        source-plan: PROJECTKEY-PLANKEY\n```\n\nYou need to provide the key of the plan you are downloading artifacts from in the `source-plan` keyword.\n\nIn GitLab, all artifacts from completed jobs in the previous stages are downloaded by default.\nHere is an example of an artifact definition in GitLab:\n\n```yaml\npdf:\n  script: xelatex mycv.tex\n  artifacts:\n    name: \"pdf-files\"\n    public: false\n    untracked: true\n    paths:\n      - pdfs/\n    exclude:\n      - pdfs/*.tex\n```\n\nIn the CI/CD script above:\n\n- The name of the artifact is specified laterally. You can choose to make it dynamic by using a CI/CD variable.\n- The `public` keyword is used to set whether the artifact should be publicly available. This is not enabled by default\n  on self-managed GitLab instances. An administrator can enable it with the [feature flag](https://docs.gitlab.com/ee/administration/feature_flags/)\n  named `non_public_artifacts`.\n- You can set the `untracked` to include or exclude Git untracked files along with those specified using `paths`.\n\nRead more about GitLab CI/CD [job artifacts](https://docs.gitlab.com/ee/ci/jobs/job_artifacts.html).\n\n## How to plan your migration\n\nPlanning a migration to Gitlab CI/CD from Bamboo doesn't start with converting your Bamboo plan to GitLab CI/CD scripts. It starts with aligning with your leadership and stakeholders and clearly communicating the vision of the migration. [Check out our documentation to learn more about managing organizational changes](https://docs.gitlab.com/ee/ci/migration/plan_a_migration.html). Once you have the necessary buy-in, you can proceed with the following steps:\n\n- [Import your projects](https://docs.gitlab.com/ee/user/project/import/) to GitLab.\n- Identify the necessary binaries and build tools needed to build your application, along with their dependencies.\n- Define the flow of your pipeline, which jobs depend on each other, and the necessary triggers.\n- Learn more about [key GitLab CI/CD features](https://docs.gitlab.com/ee/ci/index.html).\n- Identify the credentials and variables needed in your pipeline and define them in the variable section of your project's CI/CD settings or using a secret manager.\n- Follow [this tutorial](https://docs.gitlab.com/ee/ci/quick_start/index.html) to create your first GitLab pipeline; you can also explore more [complex pipelines](https://docs.gitlab.com/ee/ci/quick_start/tutorial.html).\n- Iterate and test your GitLab CI/CD pipelines and review [.gitlab-ci.yml keyword reference](https://docs.gitlab.com/ee/ci/yaml/index.html).\n\nReady to make the move? [We’re here to help](https://about.gitlab.com/sales/).\n",[9,484],{"slug":1290,"featured":6,"template":683},"migrating-from-bamboo-to-gitlab-cicd","content:en-us:blog:migrating-from-bamboo-to-gitlab-cicd.yml","Migrating From Bamboo To Gitlab Cicd","en-us/blog/migrating-from-bamboo-to-gitlab-cicd.yml","en-us/blog/migrating-from-bamboo-to-gitlab-cicd",{"_path":1296,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1297,"content":1302,"config":1309,"_id":1311,"_type":16,"title":1312,"_source":18,"_file":1313,"_stem":1314,"_extension":21},"/en-us/blog/atlassian-server-ending-move-to-a-single-devsecops-platform",{"title":1298,"description":1299,"ogTitle":1298,"ogDescription":1299,"noIndex":6,"ogImage":1280,"ogUrl":1300,"ogSiteName":697,"ogType":698,"canonicalUrls":1300,"schema":1301},"Atlassian Server ends: Time to move to integrated DevSecOps","Atlassian is about to end support for Server products. Learn why now is the time to make the upgrade to GitLab’s single DevSecOps platform.","https://about.gitlab.com/blog/atlassian-server-ending-move-to-a-single-devsecops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Atlassian Server ending: Goodbye disjointed toolchain, hello DevSecOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dave Steer\"}],\n        \"datePublished\": \"2023-09-26\",\n      }",{"title":1303,"description":1299,"authors":1304,"heroImage":1280,"date":1306,"body":1307,"category":14,"tags":1308},"Atlassian Server ending: Goodbye disjointed toolchain, hello DevSecOps platform",[1305],"Dave Steer","2023-09-26","\nThe February 15, 2024, end-of-life date for Atlassian Server is fast approaching. If your software development workflows rely on on-premises deployments of Atlassian Server products such as Bitbucket Server for source code management, Bamboo Server for CI/CD, or Jira Server for Agile Planning, you’re faced with a choice. You can settle for the Atlassian options that remain available to you, or you can take a more forward-looking path: Make the move to a single AI-powered DevSecOps platform.\n\n## Atlassian Server end of life: Migration made easy\nMigrating to a DevSecOps platform is simple: With GitLab’s various importer tools you can quickly import repositories, Jira issues, and Bamboo jobs. Here are a few resources you can use to get started, whether you’re ready to adopt the whole GitLab platform or you’d like to move one service at a time:\n* [Import your project from Bitbucket Cloud to GitLab](https://docs.gitlab.com/ee/user/project/import/bitbucket.html)\n* [Import your project from Bitbucket Server to GitLab](https://docs.gitlab.com/ee/user/project/import/bitbucket_server.html)\n* [Import your Jira project issues to GitLab](https://docs.gitlab.com/ee/user/project/import/jira.html)\n* [Integrate Jira with GitLab](https://docs.gitlab.com/ee/integration/jira/)\n\nAtlassian discontinuing Server presents the perfect opportunity to consolidate your toolchain, increase developer efficiency, and implement DevSecOps. Let’s take a look at the benefits you can expect from making this move.\n\n## Less complexity, more productivity\nDevSecOps enables companies to build software faster, more efficiently, and more securely. At the same time, the proliferation of DevOps tools is creating additional complexity in how companies develop, secure, and deploy software, which in turn is costing companies time and money. GitLab’s [2023 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2023/) found that 84% of organizations are using between two and ten DevOps tools, with 69% of developers spending at least a quarter of their time maintaining and integrating toolchains. Even if some of those various DevOps tools are from the same vendor, they often require work in the background to set up, secure, and maintain the integrations. That’s a significant amount of effort that teams could be using to deliver value to customers.\n\nLet’s say you’re an Atlassian shop. You’re using Bitbucket Server for source code management, Bamboo Server for CI/CD, Zephyr for test case management, Jira Server for agile planning, and numerous other third-party tools for security scanning, vulnerability tracking, and more. As the Atlassian Server end of life approaches, you know that continuing to use Server without security updates and vulnerability fixes puts your company and customers at risk — a major no-go for security and compliance reasons. You have the option to move to Atlassian Cloud or, if you need to remain on-premises, Atlassian Data Center. But transitioning requires time, effort, and significant planning. If you’re already dedicating resources to making this change, why not take the opportunity to simplify matters by consolidating all of those tools into a single [AI-powered DevSecOps platform](https://about.gitlab.com/blog/categories/ai-ml/) instead?\n\nAccording to our 2023 survey, the top benefits of migrating to a DevSecOps platform include cost and time savings, increased efficiency, and better security. Plus, 90% of developers whose organizations are using a platform said they feel they’re able to effectively identify and mitigate security vulnerabilities, among other benefits.\n\n> How much is your toolchain costing you? [See how much you can save with the GitLab DevSecOps Platform](https://about.gitlab.com/calculator/roi/).\n\n## Improved developer experience\nTime spent maintaining and integrating toolchains isn’t just money down the drain for the organization — it’s also a drain on developer satisfaction. [Helping organizations better support their developers](https://about.gitlab.com/blog/the-gitlab-quarterly-how-our-latest-beta-releases-support-developers/) has always been a priority for GitLab, and we’re confident in our position that a single DevSecOps platform is the best way to do just that.\n\nWith a single platform, developers stay in flow and do what they do best: develop great software. More concretely, a superior developer experience empowers teams to:\n* focus on work that matters, with less context switching between different tools\n* onboard and get up to speed more quickly with only a single platform to learn\n* break down silos across product, development, security, and operations to foster better collaboration\n* receive continuous feedback and iterate more quickly to produce higher-quality output\n* automate manual tasks with AI built into the development lifecycle to avoid errors and wasted time\n\nIf your team hasn’t been able to invest in the developer experience, now is the perfect time to make it a priority. We’ve heard from GitLab customers like [Airbus](https://about.gitlab.com/customers/airbus/) and [Iron Mountain](https://about.gitlab.com/customers/iron-mountain/) that a toolchain consisting of Jira, Bitbucket, and Bamboo doesn’t offer a user-friendly experience and lacks key capabilities. Why migrate to a new Atlassian Cloud or Data Center setup if you’re going to be missing out on mission-critical features such as [built-in security scanning](https://docs.gitlab.com/ee/user/application_security/), [review apps](https://docs.gitlab.com/ee/ci/review_apps/), and [feature flags](https://docs.gitlab.com/ee/operations/feature_flags.html)?\n\n## Visibility at every stage\nA unified DevSecOps platform offers [out-of-the-box dashboards and reports](https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard/) that provide insights on productivity, security, code quality, and more to help teams identify and fix barriers within the software development lifecycle. In our 2023 survey, respondents whose organizations are using a DevSecOps platform were significantly more likely to say they clearly understand what is happening across all stages of the software development lifecycle.\n\nHaving that extra visibility:\n* improves software delivery quality and speed by uncovering bottlenecks in software delivery\n* boosts organizational value delivery by helping organizations identify high-performing teams, maintain standards, and share best practices\n* helps organizations ensure the security of their end-to-end software supply chain and compliance with regulatory mandates\n\nYou can’t get that visibility without a single DevSecOps platform — and GitLab is the most comprehensive AI-powered DevSecOps platform on the market. With other providers, you’re still stringing together various third-party tools into complex toolchains, hampering visibility while creating integration headaches and increasing your total cost of ownership. From idea to value, GitLab lets teams collaborate in a single application to [shorten cycle times](https://about.gitlab.com/customers/hackerone/), [reduce development costs](https://about.gitlab.com/customers/carfax/), and [increase developer productivity](https://about.gitlab.com/customers/deutsche-telekom/).\n\n> Learn why GitLab was named a [Leader in the 2023 Gartner® Magic Quadrant™ for DevOps Platforms](https://about.gitlab.com/gartner-magic-quadrant/).\n\nBottom line: [With Atlassian Server support ending](https://www.atlassian.com/migration/assess/journey-to-cloud), you shouldn’t be forced to choose an option that isn’t right for the [future of your business](https://about.gitlab.com/blog/devsecops-platforms-help-smbs-scale-as-they-grow/). Whether you’re on the lookout for alternatives to fundamental tools in your stack that will no longer be supported, or you’re simply seeking new ways to boost the productivity of your growing team, adopting an AI-powered DevSecOps platform should be as frictionless as possible. That’s why GitLab offers a variety of deployment options to meet your unique needs, including self-managed, multi-tenant SaaS, and [GitLab Dedicated, our single-tenant SaaS offering](https://about.gitlab.com/blog/introducing-gitlab-dedicated/).\n\nWhen you’re ready to make the move, [we’re here to help](https://about.gitlab.com/sales/).\n",[9,111,750],{"slug":1310,"featured":6,"template":683},"atlassian-server-ending-move-to-a-single-devsecops-platform","content:en-us:blog:atlassian-server-ending-move-to-a-single-devsecops-platform.yml","Atlassian Server Ending Move To A Single Devsecops Platform","en-us/blog/atlassian-server-ending-move-to-a-single-devsecops-platform.yml","en-us/blog/atlassian-server-ending-move-to-a-single-devsecops-platform",{"_path":1316,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1317,"content":1323,"config":1328,"_id":1330,"_type":16,"title":1331,"_source":18,"_file":1332,"_stem":1333,"_extension":21},"/en-us/blog/aws-devsecops-competency-partner",{"title":1318,"description":1319,"ogTitle":1318,"ogDescription":1319,"noIndex":6,"ogImage":1320,"ogUrl":1321,"ogSiteName":697,"ogType":698,"canonicalUrls":1321,"schema":1322},"GitLab achieves the AWS DevSecOps Partner Competency Specialty","The AWS DevSecOps Partner Competency Specialty demonstrates that GitLab is instrumental in helping customers implement better security while continuing to innovate.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668799/Blog/Hero%20Images/securitylifecycle.png","https://about.gitlab.com/blog/aws-devsecops-competency-partner","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab achieves the AWS DevSecOps Partner Competency Specialty\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2023-09-25\",\n      }",{"title":1318,"description":1319,"authors":1324,"heroImage":1320,"date":1325,"body":1326,"category":14,"tags":1327},[1145],"2023-09-25","\nGitLab recently achieved AWS's DevSecOps Partner Competency desigation, a sub-specialty for the [AWS DevOps ISV Partner Competency](https://partners.amazonaws.com/partners/001E0000018YWFfIAO/GitLab,%20Inc) category. GitLab also holds the AWS DevOps ISV Partner Competency designation. AWS's partner qualification program signifies to customers that AWS has vetted GitLab's capabilities and use cases.\n\n> Attending [AWS re:Invent 2023](https://reinvent.awsevents.com/)? Find us at Booth 1152.\n\nAccording to AWS, solutions in the [DevSecOps category](https://aws.amazon.com/devops/partner-solutions/?blog-posts-cards.sort-by=item.additionalFields.createdDate&blog-posts-cards.sort-order=desc&partner-case-studies-cards.sort-by=item.additionalFields.sortDate&partner-case-studies-cards.sort-order=desc) \"make it easy for customers to integrate security across every stage of the development and delivery cycles, providing rapid and contextual feedback to development, security, and ops teams.\" The designation comprises a [validation checklist](https://apn-checklists.s3.amazonaws.com/competency/devops/technology/CenAm4qx8.html#competencyCategories) and attestation that GitLab's DevSecOps Platform meets AWS’s expectations.\n\n## GitLab's strength in DevSecOps\nGitLab's [AI-powered DevSecOps platform](https://about.gitlab.com/gitlab-duo/) helps organizations shift left on vulnerability remediation. At GitLab, shifting left means ensuring developers have a frictionless security defect remediation experience that enables them to immediately handle vulnerabilities in their code.\n\nGitLab's DevSecOps Platform:\n- surfaces security findings shortly after they are introduced and while the code is still being worked on\n- associates findings directly with those who changed the code\n- offers remediation guidance (including on-demand training and automated fixes)\n- supports rich, in-context collaboration for vulnerability management\n\n![GitLab + AWS Workflow](https://about.gitlab.com/images/blogimages/aws/devsecops-post/gitlabawsworkflow.png)\n\n\n![AWS Partner Logo](https://about.gitlab.com/images/blogimages/aws/devopsisvpartner.png){: .right}\n",[1149,284,1128],{"slug":1329,"featured":6,"template":683},"aws-devsecops-competency-partner","content:en-us:blog:aws-devsecops-competency-partner.yml","Aws Devsecops Competency Partner","en-us/blog/aws-devsecops-competency-partner.yml","en-us/blog/aws-devsecops-competency-partner",{"_path":1335,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1336,"content":1342,"config":1348,"_id":1350,"_type":16,"title":1351,"_source":18,"_file":1352,"_stem":1353,"_extension":21},"/en-us/blog/eliminate-risk-with-feature-flags-tutorial",{"title":1337,"description":1338,"ogTitle":1337,"ogDescription":1338,"noIndex":6,"ogImage":1339,"ogUrl":1340,"ogSiteName":697,"ogType":698,"canonicalUrls":1340,"schema":1341},"How to use feature flags to lower risk in deployments","Follow this comprehensive tutorial to learn how to create and use feature flags in your software development environment.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667743/Blog/Hero%20Images/flags.png","https://about.gitlab.com/blog/eliminate-risk-with-feature-flags-tutorial","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use feature flags to lower risk in deployments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2023-09-20\",\n      }",{"title":1337,"description":1338,"authors":1343,"heroImage":1339,"date":1345,"body":1346,"category":14,"tags":1347},[1344],"Cesar Saavedra","2023-09-20","\nDevelopers typically use advanced techniques like canary, blue/green, and incremental deployments to reduce risk when practicing progressive delivery, a facet of continuous delivery (CD). In this tutorial, we will show you how to use feature flags, another progressive delivery option developers can use to test while in production.\n\n## What is progressive delivery?\nProgressive delivery is the ability to test in production while controlling your audience of who can exercise or see updates to an application with a high level of granularity. This approach can also be thought of as developer experimentation.\n\n## What are feature flags\nFeature flags enable you to choose what to deploy and who to deploy to in production. They allow you to define the audience for your application updates as well as the fashion in which they will be served.\n\nFeature flags help stakeholders reduce risk, allowing them to do controlled testing of features and separate feature delivery from customer launch.\n\n## Benefits of feature flags\nThe following are benefits of GitLab's feature flags.\n- **Lower risk.** Feature flags prevent unscheduled outages, control your audience in a fine-grained fashion, and can be optionally used in conjunction with canary deployments.\n- **Ease of use.** Feature flags have simple configurability and instrumentation, support user lists, and offer built-in service.\n- **Language agnostic.** Our feature flag implementation supports all of the main programming languages.\n- **Better compliance and audit capabilities.** The GitLab platform automatically records all feature flags actions.\n\n## Tutorial requirements\nThis is what you need for this tutorial:\n1. A GitLab account on gitlab.com SaaS\n2. Flux CLI installed on your local desktop (on my Mac, I installed it by executing `brew install fluxcd/tap/flux`)\n3. A running Kubernetes cluster, i.e. a GKE cluster with 3 e2-medium nodes\n4. `kubectl` connectivity to your Kubernetes cluster from a local Terminal window on your desktop\n\n## About this feature flag tutorial\nThis tutorial is based on a fictitious application, which is a simplified inventory system. The goal of this tutorial is to show you how to create, configure, and implement a feature flag using GitLab.\n\n**Note:** This tutorial is for learning purposes and not meant to deploy a production-ready architecture. Also, to keep the number of steps low, masked variables and sealed secrets are not being used throughout this tutorial.\n\n## Flux and the GitLab agent for Kubernetes\nHere is how to install Flux and GitLab agent for Kubernetes.\n- Log on to your GitLab workspace.\n- Create a personal access token (PAT) from your GitLab account by navigating to **User settings > Preferences > Access tokens**. In the **Personal Access Tokens** section, click on the **Add new token** button on the righthand side of the section. For **Token name**, enter `pat-for-flux`. Leave the expiration date with its default (it should be 30 days from its creation) and select the **API** scope for your **PAT**. Click on the **Create personal access token** button to create your PAT. Copy and save the value of your **PAT**; you will need it at a later step.\n\n![create-pat](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/create-pat.png){: .shadow.medium.center}\nCreating a personal access token\n{: .note.text-center}\n\n- Head back to your GitLab workspace main page.\n- Create a group named “hn” by clicking the button **New group** (or **New subgroup** if you are creating this group inside an existing group) on the top right hand side of your screen, and then clicking on the **Create group** tile. Enter \"hn\" for your **Group name** and click on the **Create group** button to create it. Leave the rest of the fields with their defaults.\n\n![create-group-hn](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/create-group-hn.png){: .shadow.medium.center}\nCreating group \"hn\"\n{: .note.text-center}\n\n- Inside group “hn”, create project “flux-config” by clicking the **New project** on the top righthand side of your screen and then clicking on the **Create blank project** tile.\n\n![create-proj-flux-config](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/create-proj-flux-config.png){: .shadow.medium.center}\nCreating project \"flux-config\"\n{: .note.text-center}\n\n- From the Terminal window with `kubectl` access to your Kubernetes cluster, export your **PAT** by entering the following command:\n\n> export GITLAB_TOKEN=`\u003Creplace with your PAT value>`\n\n- From the Terminal window with `kubectl` access to your Kubernetes cluster, bootstrap Flux by executing the following command:\n\n**Note:** Make sure to replace `\u003Cyour path>` with whatever precedes your group “hn”. For example, it could be `--owner=tech-marketing/sandbox/hn`, or if your group “hn” is at the very top level of your GitLab workspace, it would be `--owner=hn`.\n\n```\nflux bootstrap gitlab \\\n  --owner=\u003Cyour path>/hn \\\n  --repository=flux-config \\\n  --branch=main \\\n  --path=clusters/my-cluster \\\n  --deploy-token-auth\n```\n\n![flux-bootstrap-output](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/flux-bootstrap-output.png){: .shadow.medium.center.}\nFlux bootstrap output\n{: .note.text-center}\n\nThe “flux-config” project should now contain new directories and files as shown below:\n\n![flux-config-post-bootstrap](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/flux-config-post-bootstrap.png){: .shadow.medium.center}\nProject flux-config post flux bootstrap process\n{: .note.text-center}\n\n- Head over to project **hn/flux-config** and create file “.gitlab/agents/k8s-agent/config.yaml” by clicking on the **+** sign next to the “flux-config” and selecting **New file**. Paste the following into it the new file:\n\n**Note:** Make sure to replace `\u003Cyour path>` with whatever precedes your group “hn”. For example, it could be `- id: tech-marketing/sandbox/hn` or if your group “hn” is at the very top level of your GitLab workspace, it would be `- id: hn`.\n\n```\nci_access:\n  groups:\n    - id: \u003Cyour path>/hn\n```\n\nCommit this file to main by clicking on the **Commit changes** button and ensuring that the target branch is “main”.\n\n![create-config-yaml](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/create-config-yaml.png){: .shadow.medium.center}\nCreating the GitLab agent for Kubernetes configuration manifest\n{: .note.text-center}\n\n- Head to **Operate > Kubernetes clusters** and register the agent by clicking the **Connect a cluster** button.\n\n![register-agent](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/register-agent.png){: .shadow.medium.center}\nRegistering the GitLab agent for Kubernetes\n{: .note.text-center}\n\n- On the “Connect a Kubernetes cluster” dialog, click on the popdown list and select agent “k8s-agent”. Click on the **Register** button. The dialog will refresh and show the **Agent access token**. Copy and save the **Agent access token**; you will need it at a later step. Close the dialog by clicking on the **Close** button.\n\n![agent-access-token-dialog](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/agent-access-token-dialog.png){: .shadow.medium.center}\nThe agent access token to save\n{: .note.text-center}\n\nAt this moment, you will see the agent listed and its Connection status will be “Never connected”.\n\n![agent-not-connected](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/agent-not-connected.png){: .shadow.medium.center}\nAgent registered but not connected yet\n{: .note.text-center}\n\n-  Head to **flux-config/clusters/my-cluster** directory and create a file named “namespace-gitlab.yaml” and paste the following into it:\n\n```\napiVersion: v1\nkind: Namespace\nmetadata:\n  name: gitlab\n```\n\n![gitlab-namespace-manifest](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/gitlab-namespace-manifest.png){: .shadow.medium.center}\nManifest for the gitlab namespace\n{: .note.text-center}\n\nCommit this file to main by clicking on the **Commit changes** button and ensuring that the target branch is “main”.\n\n```\nNote: You can check that the namespace was created in your cluster by executing this command from a Terminal:\n\nkubectl get ns\n```\n\n![gitlab-ns-created](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/gitlab-ns-created.png){: .shadow.medium.center}\nFlux created gitlab namespace\n{: .note.text-center}\n\n- Before we have Flux deploy the GitLab agent for Kubernetes to your cluster, we need to create a secret, containing the **Agent access token** you saved earlier, in your cluster. Create a file named “secret.yaml” in your local desktop, paste the following into it and then save it:\n\n**Note:** Make sure to replace `\u003Cyour-agent-access-token-here>` with your **Agent access token** you saved earlier.\n\n```\napiVersion: v1\nkind: Secret\nmetadata:\n  name: gitlab-agent-token-initial\ntype: Opaque\nstringData:\n  values.yaml: |-\n    config:\n      token: \"\u003Cyour-agent-access-token-here>\"\n```\n\n![agent-token-secret](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/agent-token-secret.png){: .shadow.medium.center.}\nManifest for agent token secret created on local desktop\n{: .note.text-center}\n\n- Create the secret in your cluster by executing the following command from a Terminal:\n\n> kubectl apply -f secret.yaml -n gitlab\n\n```\nNote: You can check that the secret was created in your cluster by executing this command from a Terminal:\n\nkubectl get secrets -n gitlab\n```\n\n![apply-agent-token-secret](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/apply-agent-token-secret.png){: .shadow.medium.center}\nApplying the agent token secret to the Kubernetes cluster\n{: .note.text-center}\n\n- Now let’s use the Flux Helm Controller to deploy the GitLab agent for Kubernetes to your cluster. Head to **flux-config/clusters/my-cluster** directory and create a file named “agentk.yaml” and paste the following into it:\n\n```\n---\napiVersion: source.toolkit.fluxcd.io/v1beta2\nkind: HelmRepository\nmetadata:\n  labels:\n    app.kubernetes.io/component: agentk\n    app.kubernetes.io/created-by: gitlab\n    app.kubernetes.io/name: agentk\n    app.kubernetes.io/part-of: gitlab\n  name: gitlab-agent\n  namespace: gitlab\nspec:\n  interval: 1h0m0s\n  url: https://charts.gitlab.io\n---\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\n  name: gitlab-agent\n  namespace: gitlab\nspec:\n  chart:\n    spec:\n      chart: gitlab-agent\n      sourceRef:\n        kind: HelmRepository\n        name: gitlab-agent\n        namespace: gitlab\n  interval: 1h0m0s\n  values:\n    replicas: 1\n    config:\n      kasAddress: \"wss://kas.gitlab.com\"  \n  valuesFrom:\n    - kind: Secret\n      name: gitlab-agent-token-initial\n      valuesKey: values.yaml\n```\n\n![create-agentk-manifest](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/create-agentk-manifest.png){: .shadow.medium.center}\nCreating the manifest for the GitLab agent for Kubernetes\n{: .note.text-center}\n\nCommit this file to main by clicking on the **Commit changes** button and ensuring that the target branch is “main”.\n\n```\nNote: In a few seconds, you can check that the GitLab agent for Kubernetes was created in your cluster by executing this command from a Terminal (the pod name should start with “gitlab-agent”):\n\nkubectl get pods -n gitlab\n```\n![agentk-pod-up](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/agentk-pod-up.png){: .shadow.medium.center}\nAgentk running in the Kubernetes cluster\n{: .note.text-center}\n\n## Creating an instance of MySQL database in your cluster via Flux\n- Using the breadcrumb at the top of your window, head to group “hn” and create a new project by clicking on the **New project** button. On the **Create new project** window, click on the **Import project** tile.\n- At the **Import project** window, click on the **Repository by URL** button. The window will display fields to enter the URL of the repository you would like to import. In the text field **Git repository URL**, enter the following:\n\n> [https://gitlab.com/tech-marketing/sandbox/mysql.git](https://gitlab.com/tech-marketing/sandbox/mysql.git)\n\nLeave the rest of the fields with their defaults.\n\n![import-mysql-proj](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/import-mysql-proj.png){: .shadow.medium.center}\nImporting mysql project into group \"hn\"\n{: .note.text-center}\n\n- Click on the **Create project** button at the bottom of the screen. You will see an \"Importing in progress\" message temporarily on your screen.\n- Now we need to create a deploy token for this project so that Flux can interact with it. While in project “mysql”, select **Settings > Repository** and scroll down to the **Deploy tokens** section. Click on the **Expand** button to the right of the **Deploy tokens** section. Then click on the **Add token** button, which will expand the section to include fields to start entering information for the deploy token to be created.\n- Give the deploy token the name “mysql-flux-deploy-token” and check the checkbox **read_repository** for it. Then click on the button **Create deploy token** to create the token.\n\n![create-mysql-deploy-token](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/create-mysql-deploy-token.png){: .shadow.medium.center}\nCreating the deploy token for \"mysql\" project for Flux to interact with it\n{: .note.text-center}\n\nCopy and save the username and password for the newly created deploy token; you will need them at a later step.\n\n![mysql-deploy-token-created](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/mysql-deploy-token-created.png){: .shadow.medium.center}\nCreating the deploy token for \"mysql\" project for Flux to interact with it\n{: .note.text-center}\n\n-  From a Terminal, execute the following command to create a secret in your cluster for the deploy token you just created:\n\n**Note:** Make sure to replace `\u003Cyour path>` with the missing partial path to the project “mysql”, \u003Cyour-deploy-token-username> with the deploy token username you saved earlier, and the \u003Cyour-deploy-token-password> with the deploy token password you saved earlier.\n\n```\nflux create secret git mysql-flux-deploy-authentication \\\n         --url=https://gitlab.com/\u003Cyour path>/hn/mysql \\\n         --namespace=default \\\n         --username=\u003Cyour-deploy-token-username> \\\n         --password=\u003Cyour-deploy-token-password>\n```\n\n```\nNote: You can check that the secret was created in your cluster by executing this command from a Terminal:\n\nkubectl -n default get secrets mysql-flux-deploy-authentication\n```\n\n![mysql-secret-created](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/mysql-secret-created.png){: .shadow.medium.center}\nCreating secret for the deploy token for \"mysql\" project in the Kubernetes cluster\n{: .note.text-center}\n\n- Head back to project “hn/flux-config” and open the Web IDE from it.\n\n![open-web-ide](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/open-web-ide.png){: .shadow.medium.center}\nSelecting Web IDE from the dropdown menu\n{: .note.text-center}\n\n- From inside the Web IDE, navigate to directory \"clusters/my-cluster\".\n\n![goto-clusters-mycluster](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/goto-clusters-mycluster.png){: .shadow.medium.center}\nNavigate to directory \"clusters/my-cluster\" in the Web IDE\n{: .note.text-center}\n\n- Inside “clusters/my-cluster” directory, create file “mysql-manifests-source.yaml” and paste the following text into it:\n\n**Note:** Replace `\u003Cyour path>` with the missing partial path to the project “mysql”\n\n```\napiVersion: source.toolkit.fluxcd.io/v1beta2\nkind: GitRepository\nmetadata:\n  name: mysql\n  namespace: default\nspec:\n  interval: 1m0s\n  ref:\n    branch: main\n  secretRef:\n    name: mysql-flux-deploy-authentication\n  url: https://gitlab.com/\u003Cyour path>/hn/mysql\n```\n\n![create-mysql-source-manifest](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/create-mysql-source-manifest.png){: .shadow.medium.center}\nCreating mysql-manifests-source.yaml file in the Web IDE\n{: .note.text-center}\n\n- Still in the Web IDE, inside “clusters/my-cluster” directory, create file “mysql-manifests-kustomization.yaml” and paste the following text into it:\n\n```\napiVersion: kustomize.toolkit.fluxcd.io/v1beta2\nkind: Kustomization\nmetadata:\n  name: mysql-source-kustomization\n  namespace: default\nspec:\n  interval: 1m0s\n  path: ./\n  prune: true\n  sourceRef:\n    kind: GitRepository\n    name: mysql\n    namespace: default\n  targetNamespace: default\n```\n\n![create-mysql-kustomization-manifest](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/create-mysql-kustomization-manifest.png){: .shadow.medium.center}\nCreating mysql-manifests-kustomization.yaml file in the Web IDE\n{: .note.text-center}\n\n- From the Web IDE, commit both files to the main branch by clicking on the **Source Control** icon on the left vertical menu, pressing the **Commit to main** button.\n\n![commit-to-main](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/commit-to-main.png){: .shadow.medium.center}\nClicking on the Source Control icon and committing to main in the Web IDE\n{: .note.text-center}\n\nThen press the **Continue** button to confirm that you want to commit your changes to the default branch:\n\n![commit-to-main-continue](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/commit-to-main-continue.png){: .shadow.medium.center}\nClicking on the Source Control icon and committing to main in the Web IDE\n{: .note.text-center}\n\n- Flux will deploy MySQL to your Kubernetes cluster. You can close the Web IDE browser tab at this point.\n\n```\nNote: You can check that the GitLab agent for Kubernetes was created in your cluster by executing this command from a Terminal:\n\nkubectl get pods -l app=mysql\n\nYou can check the persistent volume by executing this command from a Terminal:\n\nkubectl describe pvc mysql-pv-claim\n```\n\n![mysql-pod-and-pv-up](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/mysql-pod-and-pv-up.png){: .shadow.center}\nVerifying that mysql pod and its associated persitent volume claim are up and ready\n{: .note.text-center}\n\n- Now that the MySQL pod is up and running, we need to create a database, tables, and indexes in it and also populate some of the tables with dummy data for the inventory system. Using the breadcrumb at the top of your window, head over to the “mysql” project and select **Build > Pipelines** from the left vertical navigation menu.\n\n![head-to-mysql-build-pipelines](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/head-to-mysql-build-pipelines.png){: .shadow.medium.center}\nHead to \"mysql\" project and select **Build > Pipelines** from the left vertical navigation menu\n{: .note.text-center}\n\n- Click on the **Run pipeline** button on the top right side of the **Pipelines** window. This will put you on the **Run pipeline** window. Click on the **Run pipeline** button on the bottom left of the **Run pipeline** window leaving the rest of the fields with its defaults.\n\n![run-pipeline-button](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/run-pipeline-button.png){: .shadow.medium.center}\nClicking on the **Run pipeline** button to run the project \"mysql\" pipeline\n{: .note.text-center}\n\n- At this point you will see the pipeline stage and jobs. There are two jobs under the **Build** stage: **create_and_load_db** and **clear_db**.\n\n![mysql-pipeline](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/mysql-pipeline.png){: .shadow.medium.center}\nThe \"mysql\" pipeline and its two manual jobs\n{: .note.text-center}\n\n- Click on the Play button (the right solid arrow) next to the **create_and_load_db** job name. This job will create a **product** table and a **users** table and populate them with dummy data. It will also create tables and indexes needed for storing all the session-related information as users log in and log out from the inventory system.\n\n**Note:** The **clear_db** job should only be used if you’d like to erase all of the database resources created by the **create_and_load_db** job. The **clear_db** should only be used AFTER a failed run of the **create_and_load_db** job.\n\nNow that we have the database ready to go, let’s set up the project that we will use for the creation of the feature flags.\n\n## Creating and importing projects\n- Head back to group “hn” and inside of it, create a cluster management project (you can call it “cluster-management”) at the same level as the project you imported above. You can view this [instructional video](https://www.youtube.com/watch?v=QRR3WuwnxXE&t=200s) (up to minute 6:09) to see how to do this. While applying the steps in the video for this tutorial, adjust the variables values from the video to this post as described in the following notes:\n\n**Note 1:** Make sure to create and set the KUBE_CONTEXT and KUBE_NAMESPACE variable in group “hn” and to these values:\n\n| variable | value |\n| ---          | ---      |\n| KUBE_CONTEXT | `\u003Cyour path>`/hn/flux-config:k8s-agent |\n| KUBE_NAMESPACE | my-apps |\n\nFor example, in my case `\u003Cyour path>` was “tech-marketing/sandbox/hn/flux-config:k8s-agent”. In your case, it will be different. If `\u003Cyour path>` is at the root of your GitLab workspace, then it would be empty so the value of KUBE_CONTEXT would be “hn/flux-config:k8s-agent”.\n\n![add-var-KUBE_CONTEXT](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/add-var-KUBE_CONTEXT.png){: .shadow.medium.center}\nAdding variable KUBE_CONTEXT in group \"hn\"\n{: .note.text-center}\n\n![add-var-KUBE_NAMESPACE](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/add-var-KUBE_NAMESPACE.png){: .shadow.medium.center}\nAdding variable KUBE_NAMESPACE in group \"hn\"\n{: .note.text-center}\n\n**Note 2:** As an FYI, when uncommenting the GitLab managed apps in the “helmfile.yaml” file, there will not be one for Prometheus. So, you will only uncomment the lines for ingress and cert-manager.\n\n![uncomment-ingress-and-cert-manager](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/uncomment-ingress-and-cert-manager.png){: .shadow.medium.center}\nUncommenting lines for ingress and cert-manager in file \"helmfile.yaml\"\n{: .note.text-center}\n\n**Note 3:** When the pipeline for project “cluster-management” runs, you will notice that the job “sync” is a manual job. You will need to click on its **Play** (right arrow next to its name) button to run it. Wait until the “sync” job completes successfully before continuing.\n\n![click-play-on-sync-job](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/click-play-on-sync-job.png){: .shadow.medium.center}\nJob \"sync\" is manual so you need to press on the **Play** button next to its name\n{: .note.text-center}\n\n**Note 4:** Once the pipeline finishes, for your convenience, here is the command you need to run from a Terminal window to get the **external IP** address of your cluster:\n\n```\nkubectl --namespace gitlab-managed-apps get services -o wide -w ingress-ingress-nginx-controller\n```\n\n![getting-external-ip-address](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/getting-external-ip-address.png){: .shadow.medium.center}\nRunning `kubectl` command to get the ingress IP address to the cluster\n{: .note.text-center}\n\nCreate and set a variable `KUBE_INGRESS_BASE_DOMAIN` in group “hn” and set it to the **external IP** address of your cluster and append the suffix “.nip.io” to it.\n\n![add-var-KUBE_INGRESS_BASE_DOMAIN](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/add-var-KUBE_INGRESS_BASE_DOMAIN.png){: .shadow.medium.center}\nAddding variable KUBE_INGRESS_BASE_DOMAIN in group \"hn\"\n{: .note.text-center}\n\n- Inside group “hn”, create a new project. Click on the **New project** button. On the **Create new project** window, click on the **Import project** tile and then click on the **Repository by URL** button.\n- This will expand the window and show fields to enter the URL of the repository you would like to import. In the field **Git repository URL**, enter the following:\n\n> [https://gitlab.com/tech-marketing/sandbox/prodmgr.git](https://gitlab.com/tech-marketing/sandbox/prodmgr.git)\n\nLeave the rest of the fields with their defaults.\n\n![import-prodmgr-proj](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/import-prodmgr-proj.png){: .shadow.medium.center}\nImporting project \"prodmgr\" into group \"hn\"\n{: .note.text-center}\n\n- Click on the **Create project** button at the bottom of the screen. You will see an **Importing in progress** message temporarily on your screen.\n- In project “prodmgr”, create a pipeline file and make sure to name it “.gitlab-ci.yml”. Paste the following code block into the empty file:\n\n```\ninclude:\n  template: Auto-DevOps.gitlab-ci.yml\n\nvariables:\n  K8S_SECRET_TF_VAR_dbusername: \"sasha\"\n  K8S_SECRET_TF_VAR_dbpassword: \"password\"\n  TEST_DISABLED: \"true\"\n  CODE_QUALITY_DISABLED: \"true\"\n  LICENSE_MANAGEMENT_DISABLED: \"true\"\n  BROWSER_PERFORMANCE_DISABLED: \"true\"\n  LOAD_PERFORMANCE_DISABLED: \"true\"\n  SAST_DISABLED: \"true\"\n  SECRET_DETECTION_DISABLED: \"true\"\n  DEPENDENCY_SCANNING_DISABLED: \"true\"\n  CONTAINER_SCANNING_DISABLED: \"true\"\n  DAST_DISABLED: \"true\"\n  REVIEW_DISABLED: \"true\"\n  CODE_INTELLIGENCE_DISABLED: \"true\"\n  CLUSTER_IMAGE_SCANNING_DISABLED: \"true\"\n  POSTGRES_ENABLED: \"false\"\n  STAGING_ENABLED: \"true\"\n  INCREMENTAL_ROLLOUT_MODE: \"manual\"\n```\n\nClick on the **Commit changes** button ensuring that the **Target branch** is main.\n\n![prodmgr-proj-pipeline](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/prodmgr-proj-pipeline.png){: .shadow.medium.center}\nCreating an Auto-DevOps-based pipeline for project \"prodmgr\"\n{: .note.text-center}\n\n- The previous step builds the application and deploys it to the staging environment. Once deployed to staging, head to **Build > Pipelines** and click on the most recently executed pipeline (should be the first one in the list). Click on the pipeline to display it and then deploy the application to production by clicking on “rollout 100%” job.\n\n![rollout-to-prod](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/rollout-to-prod.png){: .shadow.medium.center}\nTo deploy the application to production, click on the **rollout 100%** Play button\n{: .note.text-center}\n\nAt this point, you have a running application in the staging and production environments in your Kubernetes cluster. Let’s start creating a feature flag.\n\n## Creating a new feature flag\n-  In project “prodmgr”, select **Deploy > Feature flags** from your left vertical navigation menu.\n\n### Creating a user list\n- Click on the link **View user lists** on the top right hand side of your screen.\n- Click on the **New user list** button on the top right hand side of your screen.\n- In the **Name** field of the user list, enter “prods-in-alphabetical-order-userlist” and then click on the **Create** button.\n\n![create-ff-userlist](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/create-ff-userlist.png){: .shadow.medium.center.}\nCreating user list named \"prods-in-alphabetical-order-userlist”\n{: .note.text-center}\n\n- On the next screen, click on the **Add Users** button on the top right hand side of your screen.\n- In the **User IDs** text field, enter the following two email addresses and then click on the **Add** button:\n\n> michael@cfl.rr.com,mary@cfl.rr.com\n\n![add-users-to-list](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/add-users-to-list.png){: .shadow.medium.center}\nAdding users to user list \"prods-in-alphabetical-order-userlist”\n{: .note.text-center}\n\n- Head back to the Feature flags window by selecting **Deploy > Feature flags** from your left vertical navigation menu.\n\n### Creating the flag\n- Click on the **New feature flag** button on the top right hand side of your screen.\n- In the **New feature flag** window, enter “prods-in-alphabetical-order-ff”.\n\n### Specifying the strategy for the production environment\nIn the **Strategies** section of the **New feature flag** window, there should already be sub-sections for **Type** and **Environments**.\n- For **Type**, select **Percent rollout** from the dropdown menu.\n- For **Percentage**, enter **50** in the field.\n- For **Based on**, ensure that **Available ID** is selected from the popdown menu.\n- For **Environments**, click on the **+** sign and select the **production** environment.\n\n### Specifying the strategy for the staging environment\n- Click on the **Add strategy** button on the right hand side of the **Strategies** section. A new sub-section for another strategy will appear.\n- For **Type**, select **User List** from the dropdown menu.\n- For **User List**, select the user list **prods-in-alphabetical-order-userlist**.\n- For **Environments**, click on the **+** sign and select the **staging** environment.\n- Click on **Create feature flag** button at the bottom of your screen to complete the creation of the feature flag.\n\n![ff-and-strats-def](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/ff-and-strats-def.png){: .shadow.medium.center}\nDefining the feature flag with its strategies for strating and production environments\n{: .note.text-center}\n\n## Sharing feature flag configuration information with developers\nIn order for developers to instrument their code for this feature flag, you need to share with them the following information:\n- On the **Feature flags** window, click on the **Configure** button on the top right hand side of your screen.\n- Copy and save the values of **API URL** (URL where the client application connects to get a list of feature flags) and **Instance ID** (unique token that authorizes the retrieval of the feature flags). These are the two values that you will need for feature flag instrumentation.\n\n![ff-api-url-and-instance-id](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/ff-api-url-and-instance-id.png){: .shadow.medium.center}\nCopy and save the values for the feature flag API URL and Instance ID\n{: .note.text-center}\n\n- Head over to **Settings > CI/CD** and scroll down to the **Variables** section and click on its **Expand** button. Add the following two variables to your project:\n\n| Variable Key | Variable Value | Variable Type | Environment Scope | Flag - Protect variable | Flag - Mask variable\n| ----------- | ----------- | ----------- |----------- | ----------- | ----------- |\n| K8S_SECRET_UNLEASH_URL | \\\u003Csaved **API URL** value\\> | Variable | All (default) | unchecked | unchecked\n| K8S_SECRET_UNLEASH_INSTANCE_ID | \\\u003Csaved **Instance ID** value\\> | Variable | All (default) | unchecked | unchecked\n\n![add-var-K8S_SECRET_UNLEASH_URL](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/add-var-K8S_SECRET_UNLEASH_URL.png){: .shadow.medium.center}\nAdding variable K8S_SECRET_UNLEASH_URL to project \"prodmgr\"\n{: .note.text-center}\n\n![add-var-K8S_SECRET_UNLEASH_INSTANCE_ID](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/add-var-K8S_SECRET_UNLEASH_INSTANCE_ID.png){: .shadow.medium.center}\nAdding variable K8S_SECRET_UNLEASH_INSTANCE_ID to project \"prodmgr\"\n{: .note.text-center}\n\nThese two variables contain values that will be passed to your application (via the K8S_SECRET_ keyword) so that it can make use of the feature flags defined and managed by GitLab.\n\nIn order for your application to be able to use feature flags, you need to instrument your application with our Feature Flags framework. Let's see how you do this in the sample Java application.\n\n## Instrumenting the code\nIn this example, we are using the Java client for Unleash but if you’re using a different programming language then you need to use the client library for your language. To get all the supported languages, refer to the [Unleash documentation](https://docs.getunleash.io/reference/sdks) or [Unleash open source project](https://github.com/Unleash/unleash#unleash-sdks).\n\n### Instrumenting Java class files\n- In project “prodmgr”, navigate to the directory `src/main/java/csaa/jspring/ProductManager`.\n- Click on the file name “AppController.java” to view its contents and then click on the Edit button to enter edit mode.\n- You will see a few code blocks that have been commented out and are preceded by the line:\n\n> // Uncomment block below to instrument Feature Flag\n\nUncomment all the code blocks under each of the lines indicated above.\n\n![java-file-with-uncommented-lines](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/java-file-with-uncommented-lines.png){: .shadow.medium.center}\nPartial view of AppController.java file with uncommented code blocks\n{: .note.text-center}\n\n- Commit the changes to the main branch.\n- The commit starts a pipeline that deploys the application to the staging environment. Head to **Build > Pipelines** and click on the most recently executed pipeline (should be the first one in the list). Click on the pipeline to display it and wait until the **staging** job finishes. Then deploy the application to production by clicking on “rollout 100%” job.\n\nNow that the application is running in the staging and production environments, let’s see the feature flag in action.\n\n## Feature flag in action\nNow let's check how the feature flag is working.\n### Checking the feature flag in the staging environment\n- In project “prodmgr”, click on **Operate > Environments** to see the list of all environments. Then click on the \"Open live environment\" button for the staging environment.\n- A new browser tab will appear and will display a login screen. If your browser complains about the connection being insecure, accept the risk and open the browser tab.\n- Remember that the feature flag strategy for staging is based on the user list containing michael and mary in it. Let’s try logging in as each of them.\n- Enter credentials michael@cfl.rr.com with password p33sw0rd. Verify that Michael gets a product list sorted in alphabetical order. Log out and close the browser tab to ensure that his session closes.\n\n![michael-gets-ff](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/michael-gets-ff.png){: .shadow.medium.center}\nMichael gets the feature flag that orders the list of product names in alphabetical order\n{: .note.text-center}\n\n- From the Environments window, click on the \"Open live environment\" button for the staging environment. Enter credentials \"mary@cfl.rr.com\" with password \"p33sw0rd\". Verify that mary gets a product list sorted in alphabetical order. Log out and close the browser tab to ensure that her session closes.\n- From the Environments window, click on the \"Open live environment\" button for the staging environment. This time, enter credentials for \"thomas@gmail.com\" with password \"p33sw0rd\". Verify that thomas does **not** get a product list sorted in alphabetical order. Log out and close the browser tab to ensure that his session closes.\n\n![thomas-does-not-get-the-ff](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/thomas-does-not-get-the-ff.png){: .shadow.medium.center}\nThomas does not get the feature flag because the product names are not ordered in alphabetical order\n{: .note.text-center}\n\nThe steps above demonstrate that the feature flag strategy for staging successfully worked.\n\n### Checking the feature flag in the production environment\n- Click on **Operate > Environments** to see the list of all environments. Then click on the \"Open live environment\" button for the production environment.\n- A new browser tab will appear and will display a login screen. If your browser complains about the connection being insecure, accept the risk and open the browser tab.\n- Remember that the strategy in production is that the feature will be served to 50% of the users. Try logging into the web application as each of the following users keeping track of who gets the list of products sorted in alphabetical order by name and who does not:\n\n**Note:** Remember to click on the \"Open live environment\" button for the **production** environment. Once you log out from each user, remember to **close** the browser tab to ensure that the session closes.\n\n| Username | Password\n| ----------- | ----------- |\n| peter@gmail.com | pa33w0rd\n| magic@cfl.rr.com | pa33w0rd\n| michael@cfl.rr.com | pa33w0rd\n| henry@gmail.com | pa33w0rd\n| mary@cfl.rr.com | pa33w0rd\n| thomas@gmail.com | pa33w0rd\n\nYour final count should consist of three users being served the feature and three not, matching the strategy that was set for the production environment.\n\nAs changes are made to feature flags, you can track them from the audit events window.\n\n## Auditing feature flag changes\n**Note:** A Premium GitLab subscription is needed for viewing Audit events.\n\n- In project “prodmgr”, select **Secure > Audit events** from the left vertical navigation menu.\n- This displays all the events that have occurred in GitLab for the last thirty days. You will see that events related to updates to feature flags are listed.\n\n![audit-events-list](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/audit-events-list.png){: .shadow.medium.center}\nAudit events is an auditable list of actions that have been taken againt resources\n{: .note.text-center}\n\nThis auditing allows you to identify when and who made changes to feature flags. It can also help preempt out-of-compliance scenarios and streamline audits to avoid penalties, providing an opportunity to optimize cost, and lower risk of unscheduled production outages.\n\nNow you know how to create and use feature flags to lower your deployment risk.\n\nPhoto by \u003Ca href=\"https://unsplash.com/@liamdesic?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Liam Desic\u003C/a> on \u003Ca href=\"https://unsplash.com/photos/acKSt3THWKA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>\n",[111,836,9],{"slug":1349,"featured":6,"template":683},"eliminate-risk-with-feature-flags-tutorial","content:en-us:blog:eliminate-risk-with-feature-flags-tutorial.yml","Eliminate Risk With Feature Flags Tutorial","en-us/blog/eliminate-risk-with-feature-flags-tutorial.yml","en-us/blog/eliminate-risk-with-feature-flags-tutorial",{"_path":1355,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1356,"content":1362,"config":1370,"_id":1372,"_type":16,"title":1373,"_source":18,"_file":1374,"_stem":1375,"_extension":21},"/en-us/blog/five-ways-to-streamline-cloud-adoption",{"title":1357,"description":1358,"ogTitle":1357,"ogDescription":1358,"noIndex":6,"ogImage":1359,"ogUrl":1360,"ogSiteName":697,"ogType":698,"canonicalUrls":1360,"schema":1361},"5 ways to streamline your cloud adoption","As companies migrate to the cloud, consider these helpful tips for making the move smoother and more efficient.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663930/Blog/Hero%20Images/daytime-clouds_1800x945.png","https://about.gitlab.com/blog/five-ways-to-streamline-cloud-adoption","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 ways to streamline your cloud adoption\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2023-09-05\",\n      }",{"title":1357,"description":1358,"authors":1363,"heroImage":1359,"date":1365,"body":1366,"category":14,"tags":1367},[1364],"Sharon Gaudin","2023-09-05","\nMoving to the cloud makes sense to a lot of companies — it’s getting there that can be difficult.\n\n[GitLab’s 2023 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2023/) showed that migrating to the cloud can help organizations release software faster: Respondents who were running at least 25% of their applications in the cloud were twice as likely to release software faster than they were a year ago.\n\nHowever, the migration, whether to a single-cloud service or a multi-cloud environment, can be a big lift. IT teams are tasked with securing major data stores and workloads, navigating the complexities of moving legacy applications, and ensuring that cloud environments comply with applicable data regulations and laws. It can be complicated, with a lot of moving pieces that are often difficult to track. \n\nAnd the longer a migration drags on, the more things can go wrong and the more expensive it can get. It only makes sense to look for a way to make something so critical to the business easier, faster, and less expensive.\n\nAbubakar Siddiq Ango, developer evangelism program manager at GitLab, and Fatima Sarah Khalid, developer evangelist at GitLab, share five ways organizations can alleviate some of the time-consuming, repetitive, and arduous tasks it takes to successfully make that move.\n\n## 1. Take care of your data\nOne of the most difficult parts of a cloud migration is moving the data itself – especially if it’s complex and stored across multiple systems – but there are a few ways you can organize and streamline the tasks involved to make them more straightforward. For example, to save time and increase efficiency, Khalid notes that team members can create [issues](https://docs.gitlab.com/ee/user/project/issues/), break tasks down into [milestones](/blog/tackle-nists-plan-of-action-and-milestones-with-gitlabs-risk-management-features/), and use the [Roadmap](https://docs.gitlab.com/ee/user/group/roadmap/) feature, which gives teams a more granular view of their workflow.\n\n## 2. Avoid security pitfalls\n[Security](/blog/its-time-to-put-the-sec-in-devsecops/) should be a key consideration in any cloud migration. Moving to a cloud environment can inadvertently cause misconfigured servers, unsecure APIs, compliance infringements, and data loss. Any of these problems can trip up cloud migration efforts and expose the company to risk.\n\nTo ensure the move to the cloud proceeds smoothly while minimizing security risks, Ango says teams can use [container](https://docs.gitlab.com/ee/user/application_security/container_scanning/) and [dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/) and [static application security testing](https://docs.gitlab.com/ee/user/application_security/sast/) (SAST) to identify and remediate known vulnerabilities in container images, dependencies, and source code. Teams also can use features such as [code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html) analysis to supplement existing code review processes and ensure that the project’s code is simple, high-quality, and straightforward to maintain — and, therefore, less likely to cause issues during the migration.  \n\n## 3. Automate compliance\n[Compliance](/blog/top-5-compliance-features-to-leverage-in-gitlab/) is another critical issue. IT teams need to ensure the new cloud environment continues to meet all of the organization's regulatory requirements — a potentially large number of standards. That means making sure processes and safeguards focused on data protection are in place and cover the information and applications being moved to the cloud. Manually, that can involve spreadsheets, seemingly endless checklists, and cross-functional teams of people culling through data. Automation makes this more streamlined, requires far fewer people to navigate the process, and is simpler to manage. Automated DevOps practices, like security scanning, [policy automation](/solutions/compliance/), and making compliance standards part of the CI/CD pipeline, all act as guardrails to [keep an organization’s compliance needs on track](/blog/the-importance-of-compliance-in-devops/). With these tools at hand, team members can trust that when they create compliance frameworks and policies, the associated rules will be automatically deployed and enforced throughout the software development lifecycle.\n\n## 4. Relieve configuration challenges\nSetting up and configuring a cloud platform can be a time-consuming and complicated job, but [CI/CD capabilities](/blog/introducing-ci-components/) help automate the configuration process, says Ango. With CI templates, teams can build and deploy applications to different cloud providers or installation targets without having to write their own CI script every time. For instance, [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/), a collection of pre-configured features and integrations, uses CI/CD templates to handle deployments on each different cloud environment.\n\nThe [GitLab agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/install/) also can offer integration capabilities for different cloud providers and services. The agent, which helps set up GitOps, automatically deploys workloads to Kubernetes clusters. Any time new changes are made, it pulls them in and deploys them into a cluster.\nAlso, teams can use [GitLab and Terraform for infrastructure as code](https://docs.gitlab.com/ee/user/infrastructure/iac/), removing the complexities of making configuration changes repeatable, traceable, and more scalable, which is essential for cloud environments.\n\n## 5. Go multi-cloud\nWhile some companies are making initial moves to the cloud, others are expanding from a single cloud to a multi-cloud environment. This strategy enables organizations to run different workloads on different cloud platforms. Being cloud agnostic means they can use the same development tools and internal processes, and then choose where they want to have their workloads run based on their business needs. Problems can arise, though, when IT teams turn to vendor-locked, cloud native developer tools, which are tailored to their own services and might, or might not, support other cloud environments. Using different tools for each cloud platform isn’t efficient, so it’s key to find tools that are cloud or provider agnostic. \n\n## Uncomplicate cloud migration with a DevSecOps platform\nYes, there are different ways to ease a cloud migration – but do teams have to go out and round up a dozen different tools to ensure their migration is fast, secure, and compliant? No, they don't.\n\n“A lot of teams are realizing that having a single, unified place to simplify, automate, and manage the process of setting up or migrating to the cloud is a game changer,” says Khalid. “With an end-to-end [DevSecOps platform](/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards/), users are able to deploy to any of the common public clouds; support collaboration through features like merge requests, code reviews, and issue tracking; support integrations with a variety of third-party tools; and have built-in security features that allow teams to meet their needs.”\n\nTaking advantage of the GitLab DevSecOps Platform can uncomplicate a lot of those adoption challenges. And GitLab works with any cloud provider.\n\n“I know when people think about the GitLab platform, they focus on security, source code management, and [collaboration](/blog/5-ways-collaboration-boosts-productivity-and-your-career/). But we also really should be thinking about how it’s a tool that helps organizations get their [workload to the cloud](/blog/shifting-from-on-prem-to-cloud/),” says Ango. “You have to be able to work fast, move fast and deploy fast on whatever cloud environment you need, and do it all securely. That is what GitLab offers. That is a big deal.”\n\n_To find the features — all in one place — that your organization needs to ease and speed a cloud migration, check out this [free trial of GitLab Ultimate](https://about.gitlab.com/free-trial/devsecops/)._\n",[9,1368,793,1369],"cloud native","CD",{"slug":1371,"featured":6,"template":683},"five-ways-to-streamline-cloud-adoption","content:en-us:blog:five-ways-to-streamline-cloud-adoption.yml","Five Ways To Streamline Cloud Adoption","en-us/blog/five-ways-to-streamline-cloud-adoption.yml","en-us/blog/five-ways-to-streamline-cloud-adoption",{"_path":1377,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1378,"content":1384,"config":1389,"_id":1391,"_type":16,"title":1392,"_source":18,"_file":1393,"_stem":1394,"_extension":21},"/en-us/blog/how-to-migrate-gitlab-groups-and-projects-more-efficiently",{"title":1379,"description":1380,"ogTitle":1379,"ogDescription":1380,"noIndex":6,"ogImage":1381,"ogUrl":1382,"ogSiteName":697,"ogType":698,"canonicalUrls":1382,"schema":1383},"How to migrate GitLab groups and projects more efficiently","Learn about performance improvements to GitLab migration by direct transfer and what's next.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668760/Blog/Hero%20Images/migration2.jpg","https://about.gitlab.com/blog/how-to-migrate-gitlab-groups-and-projects-more-efficiently","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to migrate GitLab groups and projects more efficiently\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Magdalena Frankiewicz\"}],\n        \"datePublished\": \"2023-08-02\",\n      }",{"title":1379,"description":1380,"authors":1385,"heroImage":1381,"date":1386,"body":1387,"category":14,"tags":1388},[1227],"2023-08-02","Migrating groups and projects using direct transfer enables you to easily move GitLab resources between GitLab instances using either the UI or API. In a [previous blog post](https://about.gitlab.com/blog/try-out-new-way-to-migrate-projects/), we announced the release of migrating projects as a beta\nfeature **available to everyone**. We described the benefits of the method and steps to try it out.\n\nSince then, we have made further improvements, especially focusing on [efficient](https://gitlab.com/groups/gitlab-org/-/epics/8983) and\n[reliable](https://gitlab.com/groups/gitlab-org/-/epics/8927) migrations for large projects. In this blog, we'll elaborate on these improvements, as well as their impact on the overall process and speed of migrations. We'll also discuss estimating the duration of migrations.\n\n## Imports of CI/CD pipelines\n### Problem: Timing out\nWe received [a bug report about imports of CI/CD pipelines timing out](https://gitlab.com/gitlab-org/gitlab/-/issues/365702) and realized we needed to refine the underlying migration process. We considered the root cause of the problem and possible solutions, and ran proofs of concept. We concluded that we should tackle the\nproblem of having one massive archive file for a project with a large number of a certain relation types (for example, pipelines).\n\n### What we improved\nTo fix the problem of timeouts, we decided to introduce batching to the process of exporting and importing relations (for example, merge requests or pipelines).\n\nBefore we could fully complete the [epic for introducing the batching](https://gitlab.com/groups/gitlab-org/-/epics/9036), we had to introduce a couple of other optimizations\nto the process of exporting CI/CD pipelines.\n\nIn GitLab 15.10, we started:\n- [preloading associations when exporting CI/CD pipelines](https://gitlab.com/gitlab-org/gitlab/-/issues/391593)\n- [exporting commit notes as a separate relation](https://gitlab.com/gitlab-org/gitlab/-/issues/391601)\n\nWith these optimizations, exporting CI/CD pipelines sped up considerably. That allowed for a large number of pipelines in a project to be successfully exported to an archive file and then imported on the destination instance. However, because we were finally importing the pipelines, the overall duration of the migration increased.\n\nIn GitLab 16.3, we are introducing [exporting and importing relations in batches](https://gitlab.com/groups/gitlab-org/-/epics/9036). This has two benefits:\n- improves migration performance by creating and transferring smaller archive files, instead of one file per relation. These files can be very big if a project has thousands of pipelines.\n- enables more parallelism. For example, the CI pipeline data is now split into multiple batches and concurrent Sidekiq jobs (assuming the Sidekiq workers are available on the destination instance, see below) import each batch.\n\nThis improvement is already available by default on GitLab.com.\n- **Users migrating from a self-managed GitLab instance to GitLab.com** have to have their self-managed instance on at least GitLab 16.2, where batched export is available, to benefit from this improvement.\n- **Users migrating from GitLab.com to a self-managed GitLab instance** have to have their self-managed instance on at least GitLab 16.2 and enable the `bulk_imports_batched_import_export` [feature flag](https://docs.gitlab.com/ee/administration/feature_flags.html) to benefit from this improvement.\n\n## Can we estimate the duration of a migration?\nThis question has been asked time and again. The answer is that duration of migration with direct transfer depends on many different factors. Some of them are: \n\n- Hardware and database resources available on the source and destination GitLab instances. More resources on the source and destination instances can result in shorter migration duration because:\n  - the source instance receives API requests, and extracts and serializes the entities to export\n  - the destination instance runs the jobs and creates the entities in its database\n- Complexity and size of data to be exported. For example, imagine you want to migrate two different projects with 1,000 merge requests each. The two projects can take very different amounts of time to migrate if one of the projects has a lot more attachments, comments, and other items on the merge requests. Therefore, the number of merge requests on a project is a poor predictor of how long a project will take to migrate.\n\nThere’s no exact formula to reliably estimate a migration. However, we checked the duration of each job importing a project relation to share with you the average numbers, so you can get an idea of how long importing your projects might take. Here is what we found:\n\n- importing an empty project takes about 2.4 seconds\n- importing one MR takes about 1 second\n- importing one issue takes about 0.1 seconds\n\nYou can find more project relations and the average duration to import them in the table below.\n\n| Project resource type | Average time (in seconds) to import a single record |\n| ---- | ---- |\n| Empty project\t| 2.4 |\n| Repository | 20 |\n| Project attributes\t| 1.5 |\n| Members\t| 0.2 |\n| Labels\t| 0.1 |\n| Milestones\t| 0.07 |\n| Badges\t| 0.1 |\n| Issues\t| 0.1 |\n| Snippets\t| 0.05 |\n| Snippet repositories | 0.5 |\n| Boards\t| 0.1 |\n| Merge requests\t| 1 |\n| External pull requests\t| 0.5 |\n| Protected branches\t| 0.1 |\n| Project feature\t| 0.3 |\n| Container expiration policy\t| 0.3 |\n| Service desk setting\t| 0.3 |\n| Releases | 0.1 |\n| CI/CD pipelines\t| 0.2 |\n| Commit notes\t| 0.05 |\n| Wiki\t| 10 |\n| Uploads |\t0.5 |\n| LFS objects\t| 0.5 |\n| Design\t| 0.1 |\n| Auto DevOps\t| 0.1 |\n| Pipeline schedules\t| 0.5 |\n| References\t| 5 |\n| Push rule\t| 0.1 |\n\n## How can we migrate efficiently?\nWe also know what is needed to achieve the most efficient migration possible. \n\nA single direct transfer migration runs up to five entities (groups or projects) per import at a time, independent of the number of Sidekiq workers available on the destination instance. Importing five concurrent entities is the maximum allowed per migration by direct transfer. This limit has been set to not overload the source GitLab instance, because\nwe saw network timeouts from source instances when we removed this limitation.\n\nThat doesn't mean that if more than five Sidekiq workers are available on the destination instance that they won't be utilized during a migration. On the contrary, more Sidekiq\nworkers help speed up the migration by decreasing the time it takes to import each entity. Import of relations is distributed across multiple jobs and a single project entity\nhas over 30 relations to be migrated. [Exporting and importing relations in batches](https://gitlab.com/groups/gitlab-org/-/epics/9036) mentioned above results in even more\njobs to be processed by the Sidekiq workers. \n\nIncreasing the number of Sidekiq workers on the destination instance helps speed up the migration until the source instance hardware resources are saturated. For more information on\nincreasing the number of Sidekiq workers (increasing concurrency), see [Set up Sidekiq instance](https://docs.gitlab.com/ee/administration/sidekiq/#set-up-sidekiq-instance).\n\nThe number of Sidekiq workers on the source instance should at least be enough to export the five concurrent entities in parallel (for each running import). Otherwise, there will\nbe delays and potential timeouts as the destination is waiting for exported data to become available.\n\nDistributing projects in different groups helps to avoid timeouts. If several large projects are in the same group, you can:\n1. Move large projects to different groups or subgroups.\n1. Start separate migrations each group and subgroup.\n\nThe GitLab UI can only migrate top-level groups. Using the API, you can also migrate subgroups.\n\n## What's next for migrating by direct transfer?\nOf course, we're not done yet! We will continue to improve the direct transfer method, aiming towards coming out of beta and into general availability. Next, we are working on:\n\n- [Moving hardcoded limits of direct transfer to settings](https://gitlab.com/gitlab-org/gitlab/-/issues/384976) - Migration by direct transfer has some hardcoded limits that can be made configurable to allow self-managed GitLab administrators to tune them according to their needs. For GitLab.com, we could set these limits higher than their hardcoded setting.\n- [Removing a 90-minute export timeout](https://gitlab.com/gitlab-org/gitlab/-/issues/392725) - Removing this limit will allow exporting of even larger projects, because only projects that can be migrated in under 90 minutes are supported at the moment.\n\nMore details can be found on our [migrating by direct transfer roadmap direction page](https://about.gitlab.com/direction/manage/import_and_integrate/importers/). We are excited about this roadmap and hope you are too!\n\nWe want to hear from you. What's the most important missing piece for you? What else can we improve? Let us know\nin the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/284495) or [schedule time](https://calendly.com/gitlab-magdalenafrankiewicz/45mins) with the Import and Integrations group product manager, and we'll keep iterating!\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n\nCover image by [Adrien VIN](https://unsplash.com/fr/@4dr13nv1n?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/migration-birds?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[836,837,727,680],{"slug":1390,"featured":6,"template":683},"how-to-migrate-gitlab-groups-and-projects-more-efficiently","content:en-us:blog:how-to-migrate-gitlab-groups-and-projects-more-efficiently.yml","How To Migrate Gitlab Groups And Projects More Efficiently","en-us/blog/how-to-migrate-gitlab-groups-and-projects-more-efficiently.yml","en-us/blog/how-to-migrate-gitlab-groups-and-projects-more-efficiently",{"_path":1396,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1397,"content":1403,"config":1409,"_id":1411,"_type":16,"title":1412,"_source":18,"_file":1413,"_stem":1414,"_extension":21},"/en-us/blog/how-devsecops-drives-business-success",{"title":1398,"description":1399,"ogTitle":1398,"ogDescription":1399,"noIndex":6,"ogImage":1400,"ogUrl":1401,"ogSiteName":697,"ogType":698,"canonicalUrls":1401,"schema":1402},"How DevSecOps drives business success","Learn the benefits of DevSecOps, including how it drives ROI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663975/Blog/Hero%20Images/devsecopssurvey.png","https://about.gitlab.com/blog/how-devsecops-drives-business-success","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How DevSecOps drives business success\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2023-07-24\",\n      }",{"title":1398,"description":1399,"authors":1404,"heroImage":1400,"date":1405,"body":1406,"category":14,"tags":1407},[1364],"2023-07-24","\nDevSecOps is an evolution of DevOps — but it’s more than just adding the word “Security” in between \"Development\" and \"Operations.\" It's about bringing everyone into the security team and [building security](/the-source/security/how-to-strengthen-security-by-applying-devsecops-principles/) into the entire software development lifecycle. Although DevSecOps is likely talked about more in coding circles than in boardrooms, the benefits of DevSecOps extend to the entire organization, helping teams deliver software faster without sacrificing quality or security.\n\nBut what exactly are the business benefits of DevSecOps? In this blog post, we'll dive into the top ways DevSecOps can help organizations drive business results and ROI.\n\n> Want to dig deeper into the basics of DevSecOps and how to use it to drive business success? [Download our guide to learn more](https://page.gitlab.com/resources-ebook-devsecops-success.html?utm_campaign=devsecopsplat&utm_content=ebookdevsecopssuccess).\n\n## How DevSecOps can benefit businesses\nHere are a few of the ways adopting DevSecOps can benefit a business:\n* Moving to a single platform [reduces money spent on toolchain](/the-source/platform/devops-teams-want-to-shake-off-diy-toolchains-a-platform-is-the-answer/) licenses and management\n* Focusing on security helps secure the business and its customers\n* Increasing productivity leads to faster time to market, which means staying ahead of competitors\n* Increasing security enables DevOps teams to identify issues that could ultimately hurt the brand and break customer and partner trust\n* Catching and fixing security vulnerabilities reduces legal liability connected to breaches\n* Having DevOps teams work cross-functionally in a single tool fosters collaboration in the software development team and across the entire company\n* Managing access controls, policies, and audits in one platform eases and promotes compliance\n* Unifying with a platform makes it easier to be cloud agnostic, which makes a company more resistant to vendor outages\n* Increasing visibility into every component in the [software supply chain](https://about.gitlab.com/blog/the-ultimate-guide-to-software-supply-chain-security/) boosts security and reliability\n* Automating scanning takes human error out of the equation by reducing the chances of having vulnerabilities that hackers can exploit\n\n## How DevSecOps drives ROI\n“Central for any executive is ROI and if a decision will bring their business forward or backward,” says [Ayoub Fandi](https://gitlab.com/ayofan), senior field security engineer at GitLab. “A DevSecOps platform brings all of the features, security tools, and automation of DevSecOps together in a single app so there’s no buying, stitching, and maintaining a complicated jumble of tools. It also means a company isn’t paying for all of those licensing costs, subscription fees, and maintenance expenses. Each year, companies spend more and more on technology, so if they can find a way to reduce their spending on that, it’ll be very welcome to executives.”\n\nBy helping protect a company’s brand, enabling the organization to stay ahead in a competitive field and remain compliant, and giving executives visibility over what’s causing slowdowns or increasing velocity, DevSecOps can be a valuable business tool.\n\n## What is a DevSecOps platform?\nA [comprehensive DevSecOps platform](https://about.gitlab.com/platform/) brings all of the features, security controls, and automation of DevSecOps together in one end-to-end application, so there’s no buying and supporting a complicated jumble of tools.\n\n“If I were in a company’s buying seat, I would be looking at DevSecOps,” says Fandi. “Executives want to worry less about compliance issues and security breaches that could have a big impact on their revenue, their ability to grow the company, and customers’ confidence in doing business with them. With DevSecOps, they can worry less about all of that.”\n\n_Explore how DevSecOps and a DevSecOps platform can help organizations save money, boost competitiveness, increase security, and more. [Download our comprehensive guide to get started](https://page.gitlab.com/resources-ebook-devsecops-success.html?utm_campaign=devsecopsplat&utm_content=ebookdevsecopssuccess)._\n",[484,1408,9],"careers",{"slug":1410,"featured":6,"template":683},"how-devsecops-drives-business-success","content:en-us:blog:how-devsecops-drives-business-success.yml","How Devsecops Drives Business Success","en-us/blog/how-devsecops-drives-business-success.yml","en-us/blog/how-devsecops-drives-business-success",{"_path":1416,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1417,"content":1423,"config":1429,"_id":1431,"_type":16,"title":1432,"_source":18,"_file":1433,"_stem":1434,"_extension":21},"/en-us/blog/7-tips-on-how-to-successfully-talk-to-execs-about-devsecops",{"title":1418,"description":1419,"ogTitle":1418,"ogDescription":1419,"noIndex":6,"ogImage":1420,"ogUrl":1421,"ogSiteName":697,"ogType":698,"canonicalUrls":1421,"schema":1422},"7 tips on how to successfully talk to execs about DevSecOps","If you want to begin using DevSecOps to improve software development, you need to get business executives behind your plan. Here are tips to do just that.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670694/Blog/Hero%20Images/how-to-keep-remote-teams-engaged-cover.jpg","https://about.gitlab.com/blog/7-tips-on-how-to-successfully-talk-to-execs-about-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"7 tips on how to successfully talk to execs about DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2023-07-12\",\n      }",{"title":1418,"description":1419,"authors":1424,"heroImage":1420,"date":1425,"body":1426,"category":14,"tags":1427},[1364],"2023-07-12","\nIf you want to begin using DevSecOps to speed, secure, and improve software development, you need to get business executives behind your plan. But talking with leadership – especially C-suite executives – isn’t always easy.\n\nSoftware development teams want to use DevSecOps because it will reduce hands-on work, make the development process more efficient, [foster collaboration](/blog/5-ways-collaboration-boosts-productivity-and-your-career/), [improve security](/the-source/security/how-to-strengthen-security-by-applying-devsecops-principles/), and speed development and deployment. Getting executives to understand how that all translates into [business benefits](/blog/five-essential-business-benefits-a-devops-platform-gives-smbs/) is the key here. That’s what will spur them to OK a DevSecOps adoption.\n\n> **Be sure to download our latest guide *[How to drive business success with DevSecOps](https://page.gitlab.com/resources-ebook-devsecops-success.html?utm_campaign=devsecopsplat&utm_content=ebookdevsecopssuccess)* for more advice.**\n\nTo help executives understand the technical and business benefits of [DevSecOps](/blog/its-time-to-put-the-sec-in-devsecops/) there are pitfalls you’ll need to avoid, as well as things you should make sure you do and discuss. Let’s dive into 7 things to consider right from the start.\n\n## 1. Know your audience\nEveryone is different. Some executives want every little detail. Others want a quick overview. And if, for instance, you’re talking with a CEO, focus on reducing costs and how more efficient and faster production can boost revenue and improve time to market. For CIOs, focus on productivity and efficiency. Tell them how automation and artificial intelligence (AI) features will save teams time and hands-on work.\n\n## 2. Find a champion\nIf you’re going to be walking into a boardroom packed with C-suite executives or having a small meeting with a CEO, it helps to have backup. Find an executive who knows the language of business and key business drivers and pitch the idea of using DevSecOps to get her backing. Then she can help you make the pitch to other executives.\n\n## 3. Make sure you have a plan\nBefore talking to an executive, you first need to draft a plan. Create a basic outline that gives you guidance about the key points to touch on, but also leaves room to take questions and feedback. Make sure you listen as much as you talk. Meetings are learning opportunities.\n\n## 4. Don’t geek out on the tech\nRemember that you are talking to business people. It’s easy for a technical person to fall back on using technical lingo and talking about the excitement of using the shiniest tools. But that’s not going to get someone far with most executives. Even a CTO, who is a technical person, is focused on the business – and how any technology is going to support that business or weigh it down. Don't use technical jargon or acronyms. Convey engineering objectives into a language of profit and loss. Tell execs why migrating to a DevSecOps platform will make the software development team, and the company as a whole, more successful.\n\n## 5. Do your homework\nIf you work for a public company, listen to the quarterly reports to learn about immediate business objectives and long-term strategic goals. Have a coffee chat with or shadow someone who works in financial planning, analysis, and/or accounting. Learn from your colleagues how the company makes money and what its business needs are. Understand challenges, like security issues, [compliance issues](/blog/top-5-compliance-features-to-leverage-in-gitlab/), or competitors coming out with new features faster. Then address those challenges. Make sure your presentation focuses on the company’s specific needs and any potential future challenges.\n\n“I would recommend always starting by figuring out the business needs first,” says [Fatima Sarah Khalid](https://gitlab.com/sugaroverflow), developer evangelist at GitLab. “How does this bring value to the organization’s customers and how does this impact the company? Will it save money, unlock a new customer segment, open up new channels, or boost production and efficiency? These are the kind of strategic levers that are most helpful for leadership to hear.”\n\n## 6. Focus on benefits to the executives\nOf course, executives will want to know how DevSecOps will benefit the business that they’re running, but they’ll also want to know how it can benefit them and the specific job they’re doing. Let executives know that a DevSecOps platform will give them visibility into the entire software development lifecycle so they can see where projects slow down or progress, giving them more insight and control. And make it clear that an end-to-end platform fosters a culture where everyone, from customer service to marketing and the C-suite, can collaborate.\n\n## 7. Don’t forget the money\nAs you plan out what to talk about with executives and what business challenges to focus on, remember that money always has to be part of the conversation. Since you likely will be reducing a complex and costly toolchain with the adoption of a DevSecOps platform, estimate the savings in both cost and time that will come from cutting that toolchain. Point out the savings, in terms of money and brand image, by reducing security vulnerabilities. Management also is going to want you to estimate how much it will cost to migrate to a platform, along with the human hours needed, and an adoption timeline.\n\n“Tech people can never forget that executives are very focused on ROI,” says [Ayoub Fandi](https://gitlab.com/ayofan), senior field security engineer at GitLab. “It’s always a central issue. They need to understand if any decision will bring them closer to their business goals. Adopting a DevSecOps platform can be a massive cost reduction. Each year companies spend more money on IT so if they can learn a way to spend less, it’ll be very welcome information.”\n\nRemember that leadership likely is looking for reasons to say yes. You just need to provide them with those reasons – and make sure they’re solid business reasons. Make it clear what an adoption will look like in practice. Show them [case studies](/customers/) of other companies that have made the move.\n\nAnd the impact of enabling executives to understand the benefits of DevSecOps go beyond a single adoption. Learning how to understand an organization’s business needs and strategy, and learning the language of business are [great skills for anyone in tech](/blog/best-advice-for-your-devops-career-keep-on-learning/) to have. Continuing to educate yourself and pursuing knowledge on the business side are top ways to increase your standing in your company, your hireability, and [your paycheck](/blog/four-tips-to-increase-your-devops-salary/).\n\nGet even more advice in our our latest guide *[How to drive business success with DevSecOps](https://page.gitlab.com/resources-ebook-devsecops-success.html?utm_campaign=devsecopsplat&utm_content=ebookdevsecopssuccess)*, available for download now.\n",[9,1428,1408],"collaboration",{"slug":1430,"featured":6,"template":683},"7-tips-on-how-to-successfully-talk-to-execs-about-devsecops","content:en-us:blog:7-tips-on-how-to-successfully-talk-to-execs-about-devsecops.yml","7 Tips On How To Successfully Talk To Execs About Devsecops","en-us/blog/7-tips-on-how-to-successfully-talk-to-execs-about-devsecops.yml","en-us/blog/7-tips-on-how-to-successfully-talk-to-execs-about-devsecops",{"_path":1436,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1437,"content":1442,"config":1447,"_id":1449,"_type":16,"title":1450,"_source":18,"_file":1451,"_stem":1452,"_extension":21},"/en-us/blog/github-to-gitlab-migration-made-easy",{"title":1438,"description":1439,"ogTitle":1438,"ogDescription":1439,"noIndex":6,"ogImage":1222,"ogUrl":1440,"ogSiteName":697,"ogType":698,"canonicalUrls":1440,"schema":1441},"GitHub to GitLab migration the easy way","Learn how easy it is to migrate from GitHub to GitLab using GitLab's project import functionality.","https://about.gitlab.com/blog/github-to-gitlab-migration-made-easy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitHub to GitLab migration the easy way\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2023-07-11\",\n      }",{"title":1438,"description":1439,"authors":1443,"heroImage":1222,"date":1444,"body":1445,"category":14,"tags":1446},[1086],"2023-07-11","\nIf you are using different CI/CD tools and are considering migrating over to GitLab, you may be wondering about\nthe difficulty of the migration process. Migration is usually a concern for [DevSecOps](https://about.gitlab.com/topics/devsecops/) teams when considering a new solution. This is due to the fact that migrating may involve heavy lifting. However, migrating to the GitLab AI-powered DevSecOps Platform can be extremely simple and I will show you how step by step. \n\nIn this blog post, we will go over how to migrate from GitHub to GitLab using our [project import](https://docs.gitlab.com/ee/user/project/import/) functionality. Manually migrating GitHub Actions to GitLab pipelines will be covered as well. I have also created a video going over the migration process for those who prefer that format:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/0Id5oMl1Kqs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## What data can be migrated from GitHub to GitLab?\nGitLab's built-in importer allows for GitHub projects to be automatically migrated into GitLab. The built-in importer\nis accessed directly from GitLab's project creation UI. From the UI, you can select what data you wish to migrate to GitLab.\n\nThe data that can be migrated includes the following:\n* Repository description\n* Git repository data\n* Branch protection rules\n* Collaborators (members)\n* Issues\n* Pull requests\n* Wiki pages\n* Milestones\n* Labels\n* Release notes content\n* Release notes attachments\n* Comment attachments\n* Issue description attachments\n* Pull request description attachments\n* Pull request review comments\n* Regular issue and pull request comments\n* Git Large File Storage (LFS) objects\n* Pull request reviews\n* Pull request assigned reviewers\n* Pull request “merged by” information\n* Pull request comments replies in discussions\n* Pull request review comments suggestions\n* Issue events and pull requests events\n\nGitHub and GitLab have different naming conventions and concepts, so a mapping must be performed during the migration. For example, when collaborators/members are migrated, roles from GitHub are mapped to the appropriate GitLab roles as follows:\n\n| GitHub role | GitLab role |\n| ----------- | ----------- |\n| Read        | Guest       |\n| Triage      | Reporter    |\n| Write       | Developer   |\n| Maintain    | Maintainer  |\n| Admin       | Owner       |\n\n## Prerequisites\nNow that you have an understanding of what can be imported, let's review the prerequisites for performing the migration.\n\nWith the GitLab importer, you can either import your projects from **GitHub.com** or **GitHub Enterprise** to either **GitLab.com** or **Self-managed GitLab** as long as you meet the following requirements:\n* You must be a Maintainer on the GitLab destination group you are importing to from GitHub\n* Each GitHub author and assignee in the repository must have a public-facing email address on GitHub that matches their GitLab email address\n* GitHub accounts must have a public-facing email address that is populated\n* [GitHub import source](https://docs.gitlab.com/ee/administration/settings/visibility_and_access_controls.html#configure-allowed-import-sources) must be enabled (Self-managed GitLab only)\n\nWhen migrating a user, GitLab uses the public-facing email address in GitHub to verify the user with the same email on GitLab. Because email ownership is unique, you'll know you have set a valid user with valid permissions.\n\n## Performing the import\nNow let's go over how to perform the migration. I will be migrating my project, the [Reddit sentiment analyzer](https://github.com/fishtoadsoft/reddit-sentiment-analyzer), from GitHub to GitLab. The Reddit sentiment analyzer contains a pull request (called a merge request in GitLab), issues, and comments. \n\n**Note:** While you may not have permissions to my project, the step-by-step process applies to any project you own. I am using my project so you can see how I migrate GitHub Actions in the next section. Now, let's get started!\n\n1) Create a new project in GitLab using the [Project Creation Interface](https://gitlab.com/projects/new).\n\n2) Select the **Import Project** box. This allows you to migrate data from external sources.\n\n![Import project box](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/import_project.png)\n\n3) Under **Import project from**, press the **GitHub** button. This will take you to the **Authenticate with GitHub** page.\n\n4) Press the **Authenticate with GitHub** button. You can also use a [personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens) from GitHub with the **repo scope** if you prefer. This will take you to the GitHub authorization app.\n\n5) From here, you can grant access to [GitHub organization(s)](https://docs.github.com/en/organizations/collaborating-with-groups-in-organizations/about-organizations) where the projects you wish to migrate are located.\n\n![GitHub authorization app](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/github_authorize_app.png)\n\n6) Press the **Grant** button for the organization where the project you wish to migrate is stored.\n\n7) Press the **Authorize gitlabhq** button to grant GitLab access to the organization(s) selected. You will then be taken to the import selection page.\n\n8) From here, you can select the items you wish to import. \n\n![Import selection](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/import_selection.png)\n\n**Note:** The more items you choose to migrate, the longer the import will take.\n\n9) Then you must set the GitLab location you want to migrate the GitHub project to.\n\n![Set the GitLab location to migrate to](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/import_to.png)\n\n10) Press the **Import** button and the import will begin. You can see the progress in the UI. Once the import is complete the status will be changed to \"complete.\"\n\n[Import progress status](/images/blogimages/2023-july-github-to-gitlab-migration/import_progress.png)\n\nNow you should have the imported project in your workspace. Mine is called [https://gitlab.com/awkwardferny/reddit-sentiment-analyzer](https://gitlab.com/awkwardferny/reddit-sentiment-analyzer). When examining the imported project, you can see the following:\n\n**Repository has been migrated**\n\n![Repository has been migrated](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/migrated_data.png)\n\n**Issue has been migrated**\n\n![Issue has been migrated](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/migrated_issue.png)\n\n**Merge request has been migrated**\n\n![Merge request has been migrated](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/migrated_merge_request.png)\n\n## Migrating GitHub Actions over to GitLab CI/CD\nNow that you have migrated the project over from GitHub, notice that none of the GitHub Actions are running. Don't worry, they are very easy to migrate manually. So let's start the migration process for Actions.\n\n1) Examine the GitHub Actions within the **.github/workflows** folder. In the [project you just imported](https://gitlab.com/awkwardferny/reddit-sentiment-analyzer/-/tree/master/.github/workflows), you should see three different Action files:\n\n#### lint.yml\nThis file contains the Action, which performs linting on the source code using flake8. It uses the python:3.10 Docker image and installs the application requirements before performing the lint.\n\n```yaml\nname: \"Lint\"\n\non:\n  push:\n    branches: [ master ]\n  pull_request:\n    branches: [ master ]\n\njobs:\n  lint:\n    runs-on: ubuntu-latest\n    steps:\n    - uses: actions/checkout@v3\n    - name: Set up Python 3.10\n      uses: actions/setup-python@v4\n      with:\n        python-version: \"3.10\"\n    - name: Install dependencies\n      run: |\n        python -m pip install --upgrade pip\n        pip install flake8 pytest\n        if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n    - name: Lint with flake8\n      run: |\n        # stop the build if there are Python syntax errors or undefined names\n        flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics\n        # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide\n        flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics\n```\n\n#### smoke.yml\nThis file contains the action which performs a smoke test by just running the CLI help menu. It uses the python:3.10 Docker image and installs the application requirements before performing the smoke test.\n\n```yaml\nname: \"Smoke Tests\"\n\non:\n  push:\n    branches: [ master ]\n  pull_request:\n    branches: [ master ]\n\njobs:\n  smoke-tests:\n    runs-on: ubuntu-latest\n    steps:\n    - uses: actions/checkout@v3\n    - name: Set up Python 3.10\n      uses: actions/setup-python@v4\n      with:\n        python-version: \"3.10\"\n    - name: Install dependencies\n      run: |\n        python -m pip install --upgrade pip\n        pip install setuptools\n        if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n    - name: Install Sentiment Analysis Application\n      run: |\n        python setup.py install\n    - name: Run smoke tests\n      run: |\n        reddit-sentiment --help\n```\n\n#### unit.yml\nThis file contains the Action, which performs unit tests using pytest. It uses the python:3.10 Docker image and installs the application requirements running the unit tests.\n\n```yaml\nname: \"Unit Tests\"\n\non:\n  push:\n    branches: [ master ]\n  pull_request:\n    branches: [ master ]\n\njobs:\n  unit-tests:\n    runs-on: ubuntu-latest\n    steps:\n    - uses: actions/checkout@v3\n    - name: Set up Python 3.10\n      uses: actions/setup-python@v4\n      with:\n        python-version: \"3.10\"\n    - name: Install dependencies\n      run: |\n        python -m pip install --upgrade pip\n        pip install pytest\n        if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n    - name: Test with pytest\n      run: |\n        python -m pip install --upgrade pip\n        if [ -f test-requirements.txt ]; then pip install -r test-requirements.txt; fi\n        pytest tests/\n```\n\nNow let's go ahead and migrate these Actions over to GitLab.\n\n2) Go to the recently imported project on GitLab and open up the [WebIDE](https://docs.gitlab.com/ee/user/project/web_ide/).\n\n3) Create a file at the root called [**.gitlab-ci.yml**](https://docs.gitlab.com/ee/ci/yaml/gitlab_ci_yaml.html).\nThis file defines the GitLab pipeline.\n\n4) Add the following configuration, which will add the GitHub Actions as Jobs in the GitLab pipeline. Notice the comments I added describing each section.\n\n```yaml\n# This creates the stages in which the jobs will run. By default all\n# jobs will run in parallel in the stage. Once the jobs are completed\n# successfully then you move on to the next stage. The way jobs run\n# is completely configurable.\nstages:\n  - test\n\n# With the include statement, you can quickly add jobs which have\n# been pre-defined in external YAMLs. The SAST job I included below\n# is provided and maintained by GitLab and adds Static Application\n# Security Testing (SAST) to your pipeline.\ninclude:\n  - template: Jobs/SAST.gitlab-ci.yml\n\n# This is the unit test job which does exactly what is defined in\n# the GitHub Action in unit.yml. You can see it uses the python:3.10\n# Docker image, installs the application dependencies, and then runs\n# the unit tests with pytest. It was added with a simple copy and\n# paste and minor syntax changes.\nunit:\n  image: python:3.10\n  stage: test\n  before_script:\n    - python -m pip install --upgrade pip\n    - pip install pytest\n    - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n  script:\n    - pytest tests/\n\n# This is the lint job which does exactly what is defined in the\n# GitHub Action in lint.yml. You can see it uses the python:3.10\n# Docker image, installs the application dependencies, and then\n# performs the linting with flake8. It was added with a simple copy\n# and paste and minor syntax changes.\nlint:\n  image: python:3.10\n  stage: test\n  before_script:\n    - python -m pip install --upgrade pip\n    - pip install flake8\n    - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n  script:\n    - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics\n    - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics\n\n# This is the smoke test job which does exactly what is defined in\n# the GitHub Action in smoke.yml. You can see it uses the python:3.10\n# Docker image, installs the application dependencies, and then runs\n# the smoke tests with the Reddit sentiment analysis CLI. It was\n# added with a simple copy and paste and minor syntax changes.\nsmoke:\n  image: python:3.10\n  stage: test\n  before_script:\n    - python -m pip install --upgrade pip\n    - pip install setuptools\n    - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n    - python setup.py install\n  script:\n    - reddit-sentiment --help\n```\n\nYou can see that scripts being executed in GitLab match those scripts within the GitHub Actions. The only thing that has really changed is the syntax setting up the jobs and stages. To learn more on how to create and configure pipelines, check out the [GitLab CI/CD documentation](https://docs.gitlab.com/ee/ci/).\n\n5) Let's check in the code. From the WebIDE click on the Source Control Tab in the side panel of the WebIDE. It is the [third icon from the top](https://code.visualstudio.com/docs/sourcecontrol/overview#_commit). Then press the **Commit to 'main'** button, select **Continue**, and voila, you should now have a running pipeline.\n\n6) Examine the pipeline and make sure the jobs are running properly. Go back to your project and click on the [pipeline](https://docs.gitlab.com/ee/ci/pipelines/) icon. You can see the the four jobs we created have run.\n\n![Four jobs have run](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/gitlab_jobs.png)\n\n7) Click on the **Unit** job and you can see that the unit tests were run successfully.\n\n```bash\n$ pytest tests/\n============================= test session starts ==============================\nplatform linux -- Python 3.10.11, pytest-7.3.1, pluggy-1.0.0\nrootdir: /builds/awkwardferny/reddit-sentiment-analyzer\ncollected 2 items\ntests/test_scraper.py ..                                                 [100%]\n============================== 2 passed in 0.09s ===============================\nCleaning up project directory and file based variables\n00:00\nJob succeeded\n```\n\nAnd that's how simple it is to migrate a project over from GitHub to GitLab!\n\n## What other platforms can GitLab import from?\nThe GitLab importer allows one-click migration from several other platforms. These platforms include:\n* [Bitbucket Cloud](https://docs.gitlab.com/ee/user/project/import/bitbucket.html)\n* [Bitbucket Server (Stash)](https://docs.gitlab.com/ee/user/project/import/bitbucket_server.html)\n* [FogBugz](https://docs.gitlab.com/ee/user/project/import/fogbugz.html)\n* [Gitea](https://docs.gitlab.com/ee/user/project/import/gitea.html)\n* [Repository by URL](https://docs.gitlab.com/ee/user/project/import/repo_by_url.html)\n* [Uploading a manifest file (AOSP)](https://docs.gitlab.com/ee/user/project/import/manifest.html)\n* [Jira (issues only)](https://docs.gitlab.com/ee/user/project/import/jira.html)\n\nWe also have documentation covering how to migrate from these platforms:\n* [SVN](https://docs.gitlab.com/ee/user/project/import/#import-from-subversion)\n* [ClearCase](https://docs.gitlab.com/ee/user/project/import/clearcase.html)\n* [CVS](https://docs.gitlab.com/ee/user/project/import/cvs.html)\n* [Perforce](https://docs.gitlab.com/ee/user/project/import/perforce.html)\n* [TFVC](https://docs.gitlab.com/ee/user/project/import/tfvc.html)\n\n---\n\nThanks for reading! Now you know how easy it is to migrate from GitHub over to GitLab. For more information on GitLab\nand migrating from GitHub, follow the links below:\n\n* [GitHub-to-GitLab project migration documentation](https://docs.gitlab.com/ee/user/project/import/github.html)\n* [Available project importers](https://docs.gitlab.com/ee/user/project/import/#available-project-importers)\n* [GitHub-to-GitLab migration video](https://youtu.be/0Id5oMl1Kqs)\n\nAlso, read how GitLab has been named a leader in the DevOps platforms space by [Gartner](https://about.gitlab.com/blog/gitlab-leader-gartner-magic-quadrant-devops-platforms/) and the integrated software delivery platforms space by [Forrester](https://about.gitlab.com/blog/gitlab-leader-forrester-wave-integrated-software-delivery-platforms/).\n\n_Cover image by [Julia Craice](https://unsplash.com/@jcraice?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/migration?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)_\n",[111,749,9,484],{"slug":1448,"featured":6,"template":683},"github-to-gitlab-migration-made-easy","content:en-us:blog:github-to-gitlab-migration-made-easy.yml","Github To Gitlab Migration Made Easy","en-us/blog/github-to-gitlab-migration-made-easy.yml","en-us/blog/github-to-gitlab-migration-made-easy",{"_path":1454,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1455,"content":1461,"config":1467,"_id":1469,"_type":16,"title":1470,"_source":18,"_file":1471,"_stem":1472,"_extension":21},"/en-us/blog/three-steps-to-optimize-software-value-streams",{"title":1456,"description":1457,"ogTitle":1456,"ogDescription":1457,"noIndex":6,"ogImage":1458,"ogUrl":1459,"ogSiteName":697,"ogType":698,"canonicalUrls":1459,"schema":1460},"GitLab's 3 steps to optimizing software value streams","Discover the power of GitLab Value Streams Dashboard (VSD) for optimizing software delivery workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667893/Blog/Hero%20Images/workflow.jpg","https://about.gitlab.com/blog/three-steps-to-optimize-software-value-streams","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's 3 steps to optimizing software value streams\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2023-06-26\",\n      }",{"title":1456,"description":1457,"authors":1462,"heroImage":1458,"date":1463,"body":1464,"category":14,"tags":1465},[833],"2023-06-26","\n\n\u003Ci>This is part three of our multipart series introducing you to the capabilities within GitLab Value Stream Management and the Value Streams Dashboard. In part one, [learn about the Total Time Chart](https://about.gitlab.com/blog/value-stream-total-time-chart/) and how to simplify top-down optimization flow with Value Stream Management. In part two, learn how to [get started with the Value Streams Dashboard](https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard/). \u003C/i>\n\nIt’s no news that software development is a complex process that involves many different stages, teams, and tools. With significant investments made in digital transformation and adopting new tools following the shift to remote work, measuring and managing the business value of the software development lifecycle (SDLC) have become more complex.\n\nThis is where Value Stream Management (VSM) comes in. VSM is a methodology that helps organizations optimize their software delivery process by visualizing, measuring, and improving the flow of value (a.k.a. the “value stream”) from ideation to production. Some examples are: the amount of time it takes to go from an idea to production, the velocity of the project, bottlenecks in the development process, and long-running issues or merge requests. As you’ve probably guessed from its title, this blog will cover how the [new capabilities of GitLab Value Streams Dashboard](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/#value-streams-dashboard-is-now-generally-available) can help you do all that, and optimize your software delivery.\n\n## Value Stream Management in a nutshell \nGitLab [VSM](https://about.gitlab.com/solutions/value-stream-management/) provides end-to-end visibility into your software delivery process. It enables you to [map out your value stream](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#create-a-value-stream-with-custom-stages), identify bottlenecks, measure key metrics, and identify the places where you are either lagging or doing exceptionally well. It then also allows you to take action on these insights. In essence, GitLab VSM helps you to understand and optimize your development processes to deliver software faster and better.\n\n![GitLab Value Stream Analytics](https://about.gitlab.com/images/blogimages/2023-05-24-vsm-overview.png){: .shadow}\nWith Value Stream Analytics, you can establish a baseline for measuring software delivery performance progress and identifying the touchpoints in the process that do not add value to the customer or your business.\n{: .note.text-center}\n\nAnd if you’re wondering how GitLab VSM is able to do that, it’s because GitLab provides an entire DevSecOps platform as a single application and, therefore, holds all the data needed to provide end-to-end visibility throughout the entire SDLC. So now, your decisions rely on actual data rather than blind estimation or gut feelings. Additionally, since GitLab is the place where work happens, these insights are also actionable, allowing your users to move from “understanding” to “fixing” at any time, from within their workflow and without losing context.\n\n## How VSM works: The three-step analysis\nLet’s take a look at how GitLab VSM helps you optimize your SDLC in three easy steps:\n\n**Step 1:** Get an end-to-end view across your entire organization and pinpoint the value streams you need to focus on.\n\nThe [Value Streams Dashboard](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html) is a centralized view where you can see and compare all of the SDLC metrics of all your organization's projects. This dashboard enables you to identify hotspots in your SDLC streams — projects or teams that are underperforming, with longer stages and cycle times. It also shows you where you have the largest value contributors, so you can identify and learn what is working well and what's not. With this information at hand, you can now prioritize your efforts and understand where to spend your time.\n\n![VSM illustration](https://about.gitlab.com/images/blogimages/2023-05-24_vsm1.gif){: .shadow}\n\n\nThis centralized UI acts as a single source of truth for your organization, where all the relevant stakeholders can access, view, and analyze the same set of metrics. This ensures everyone is on the same page, promoting consistency in analysis and decision-making.\n\nRead more: [Getting started with the new GitLab Value Streams Dashboard](https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard/)\n\n**Step 2:** Drill down into a specific project.\n\nWhen you select a project from the main dashboard, you are directed to that project's Value Stream Analytics (VSA), where you see its value stream. The project's metrics are presented for each stage of the project, helping you understand where the main work lies and which stages need improvement. The VSA overview provides valuable insights into lead times, cycle times, and other critical metrics that help you identify areas for optimization.\n\n![VSM illustration](https://about.gitlab.com/images/blogimages/2023-05-24_vsm2.gif){: .shadow}\n\n\nRead more: [Value stream management: Total Time Chart simplifies top-down optimization flow](https://about.gitlab.com/blog/value-stream-total-time-chart/)\n\n**Step 3:** Dive deep into the Value Stream Analytics dashboard to analyze and fix issues.\n\nOnce the main areas of interest are identified, GitLab Value Stream Analytics (VSA) enables you to drill down further into a specific stage of the project. In the stage table, you can sort the **Last event** column to view the most recent workflow event, and sort the items by **duration** so you can rearrange the events and gain insights faster. This way, you can easily detect work items that are slowing down the project in that stage. Here's an example how we dogfood [VSA on gitlab-org](https://gitlab.com/gitlab-org/gitlab/-/value_stream_analytics). \n\nYou can identify the owner of the work items responsible for the delays, examine code changes, and perform a comprehensive analysis of the issue. This level of visibility and traceability empowers you to take targeted actions and make the necessary improvements to optimize the value stream, all within the context of your current workflow.\n\n![VSM illustration](https://about.gitlab.com/images/blogimages/2023-05-24_vsm3.gif){: .shadow}\nUse GitLab Value Stream Management to visualize the progress of work from planning to value delivery, and gain actionable context.\n{: .note.text-center}\n\n## The value of Value Stream Management\nGitLab VSM is a powerful solution that fits seamlessly into your SDLC. By providing end-to-end visibility and granular, actionable insights into the value stream, VSM enables you to optimize your software delivery and provide value to your customers faster. Access the information you need, when you need it — and easily act on it from within your workplace. VSM offers you the best of both worlds: out-of-the-box functionality and the ability to customize features.\n\nSay goodbye to time-consuming searches and hello to instant access to the information you need most. To learn more, check out the [Value Stream Analytics documentation](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html).\n\nTo help us improve the Value Stream Management, please share feedback about your experience in this [survey](https://gitlab.fra1.qualtrics.com/jfe/form/SV_50guMGNU2HhLeT4).\n",[1466,1128,836,728,727],"agile",{"slug":1468,"featured":6,"template":683},"three-steps-to-optimize-software-value-streams","content:en-us:blog:three-steps-to-optimize-software-value-streams.yml","Three Steps To Optimize Software Value Streams","en-us/blog/three-steps-to-optimize-software-value-streams.yml","en-us/blog/three-steps-to-optimize-software-value-streams",{"_path":1474,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1475,"content":1481,"config":1489,"_id":1491,"_type":16,"title":1492,"_source":18,"_file":1493,"_stem":1494,"_extension":21},"/en-us/blog/contributions-to-latest-git-release",{"title":1476,"description":1477,"ogTitle":1476,"ogDescription":1477,"noIndex":6,"ogImage":1478,"ogUrl":1479,"ogSiteName":697,"ogType":698,"canonicalUrls":1479,"schema":1480},"Git 2.41 release - Here are five of our contributions in detail","Find out how GitLab's Git team helped improve the latest version of Git.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667792/Blog/Hero%20Images/git-241.jpg","https://about.gitlab.com/blog/contributions-to-latest-git-release","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Git 2.41 release - Here are five of our contributions in detail\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Cai\"}],\n        \"datePublished\": \"2023-06-20\",\n      }",{"title":1476,"description":1477,"authors":1482,"heroImage":1478,"date":1484,"body":1485,"category":14,"tags":1486},[1483],"John Cai","2023-06-20","\n[Git 2.41](https://gitlab.com/gitlab-org/git/-/raw/master/Documentation/RelNotes/2.41.0.txt)\nwas officially released on June 1, 2023, and included some improvements from GitLab's Git team. Git is the foundation of\nrepository data at GitLab. GitLab's Git team works on everything from new\nfeatures, performance improvements, documentation improvements, and growing the Git\ncommunity. Often our contributions to Git have a lot to do with the way we integrate Git into\nour services at GitLab. Here are some highlights from this latest Git release,\nand a window into how we use Git on the server side at GitLab.\n\n## 1. Machine-parseable fetch output\nWhen `git-fetch` is run, the output is a familiar for users of Git and looks\nsomething like this:\n\n```bash\n> git fetch\nremote: Enumerating objects: 296, done.\nremote: Counting objects: 100% (189/189), done.\nremote: Compressing objects: 100% (103/103), done.\nremote: Total 296 (delta 132), reused 84 (delta 84), pack-reused 107\nReceiving objects: 100% (296/296), 184.46 KiB | 11.53 MiB/s, done.\nResolving deltas: 100% (173/173), completed with 42 local objects.\nFrom https://gitlab.com/gitlab-org/gitaly\n   cfd146b4d..a69cf20ce  master                                                                             -> origin/master\n   3a877b8f3..854f25045  15-11-stable                                                                       -> origin/15-11-stable\n * [new branch]          5316-check-metrics-and-decide-if-need-to-context-cancel-the-running-git-process-in -> origin/5316-check-metrics-and-decide-if-need-to-context-cancel-the-running-git-process-in\n + bdd3c05a2...0bcf6f9d4 blanet_default_branch_opt                                                          -> origin/blanet_default_branch_opt  (forced update)\n * [new branch]          jt-object-pool-disconnect-refactor                                                 -> origin/jt-object-pool-disconnect-refactor\n + f2447981c...34e06e106 jt-replicate-repository-alternates                                                 -> origin/jt-replicate-repository-alternates  (forced update)\n * [new branch]          kn-logrus-update                                                                   -> origin/kn-logrus-update\n + 05cea76f3...258543674 kn-smarthttp-docs                                                                  -> origin/kn-smarthttp-docs  (forced update)\n * [new branch]          pks-git-pseudorevision-validation                                                  -> origin/pks-git-pseudorevision-validation\n + 2e8d0ccd5...bf4ed8a52 pks-storage-repository                                                             -> origin/pks-storage-repository  (forced update)\n * [new branch]          qmnguyen0711/expose-another-port-for-pack-rpcs                                     -> origin/qmnguyen0711/expose-another-port-for-pack-rpcs\n + 82473046f...8e23e474c use_head_reference\n```\n\nThe problem with this output is that it's not meant for machines to parse.\n\nBut why would it be useful to make this output parseable by machines? To understand\nthis, we need to back up a little bit and talk about Gitaly Cluster. [Gitaly Cluster](https://docs.gitlab.com/ee/administration/gitaly/#gitaly-cluster)\nis a service at GitLab that provides high availability of Git repositories by\nreplicating repository writes to replica nodes. Each time a write comes in which\nchanges a Git repository (for example, a push that updates a reference) the write goes to\nthe primary node, and to all replica nodes before the write can succeed. A\nvoting mechanism takes place where the nodes vote on what its updated\nvalue for the reference would be. This vote succeeds when a quorum of replica\nnodes have successfully written the ref, and the write succeeds.\n\nOne of our remote procedure calls (RPCs) in Gitaly runs `git-fetch(1)` for repository mirroring. By\ndefault, when `git-fetch(1)` is run, it will update any references that are able\nto be fast-forwarded and fail on any reference that has since diverged will not\nbe updated.\n\nAs mentioned above, whenever there is an operation that modifies a repository, there\nis a voting mechanism that ensures the same modification is made to all replica nodes.\nTo dive in even a little deeper, our voting mechanism leverages Git's reference transaction hook,\nwhich runs an executable once per reference transaction. `git-fetch(1)` by default will\nstart a reference transaction per reference it updates. A fetch that updates hundreds or\neven thousand of references would thus vote once per reference that gets updated.\n\nIn the following sequence diagram, we are only showing one Gitaly node, but for a Gitaly Cluster\nwith, let's say, three nodes, what happens with the Gitaly primary also happens in\nthe replicas.\n\n```mermaid\nsequenceDiagram\n    participant user\n    participant GitlabUI as Gitlab UI\n    participant p as Praefect\n    participant g0 as Gitaly (primary)\n    participant git as Git\n    user->>GitlabUI: mirror my repository\n    GitlabUI->>p: FetchRemote\n    activate p\n    p->>g0: FetchRemote\n    activate g0\n    g0->>git: fetch-remote\n    activate git\n    git->>g0: vote on refs/heads/branch1 update\n    g0->>p: vote on refs/heads/branch1 update\n    git->>g0: vote on refs/heads/branch2 update\n    g0->>p: vote on refs/heads/branch2 update\n    git->>g0: vote on refs/heads/branch3 update\n    g0->>p: vote on refs/heads/branch3 update\n    deactivate git\n    note over p: vote succeeds\n    p->>GitlabUI: success\n    deactivate g0\n    deactivate p\n\n```\n\nThis is inefficient. Ideally we would want to vote once per batch of references\nupdated from one `git-fetch(1)` call. There is an option `--atomic` in\n`git-fetch(1)` that will open one reference transaction for all references\nupdated by `git-fetch(1)`. However, when `--atomic` is used, a `git-fetch` call will fail if any references have since diverged. This is not how we want repository mirroring to work. We actually want `git-fetch` to update whichever refs it can.\n\nSo, that means we cannot use the `--atomic` flag and are thus stuck voting per reference we update.\n\n### Solution: Handle the reference update ourselves\nThe way we are solving this inefficiency is to handle the reference update\nourselves. Instead of relying on `git-fetch(1)` to both fetch the objects and\nupdate all the references, we can use the `--dry-run` option of `git-fetch(1)`\nto first fetch the objects into a quarantine directory. Then if we can know\nwhich references *would* be updated, we can start a reference transaction\nourselves with `git-update-ref(1)` and update all the refs in one transaction,\nhence triggering a single vote only.\n\n```mermaid\n\nsequenceDiagram\n    participant user\n    participant Gitlab UI\n    participant p as Praefect\n    participant g0 as Gitaly (primary)\n    participant git as Git\n    user->>Gitlab UI: mirror my repository\n    Gitlab UI->>p: FetchRemote\n    activate p\n    p->>g0: FetchRemote\n    g0->>git: fetch-remote --dry-run --porcelain\n    activate git\n    note over git: objects are fetched into a quarantine directory\n    git->>g0: branch1, branch2, branch3 will be updated\n    deactivate git\n    g0->>git: update-ref\n    activate git\n    note over git: update branch1, branch2, branch3 in a single transaction\n    git->>g0: reference transaction hook\n    deactivate git\n    g0->>p: vote on ref updates\n    note over p: vote succeeds\n    p->>Gitlab UI: success\n    deactivate p\n\n```\n\nA requirement for this however, is that we would be able to parse the output of\n`git-fetch(1)` to tell which refs will be updated and to what values. Currently\nin `--dry-run`, `git-fetch(1)`'s output cannot be parsed by a machine.\n\n[Patrick Steinhardt](https://gitlab.com/pks-gitlab), Staff Backend Engineer, Gitaly, added a  `--porcelain` [option to git-fetch](https://git-scm.com/docs/git-fetch#Documentation/git-fetch.txt---porcelain)\nthat causes `git-fetch(1)` to gives its output in a machine-parseable format.\n\n```\n> git fetch --porcelain --dry-run --quiet\n* cd7ec0e2505463855d04f0a685d53af604079bdf 023a4cca58ac713090df15015a2efeadc73be522 refs/remotes/origin/master\n* 0000000000000000000000000000000000000000 b4a007671bd331f1c6f5857aa9a6ab95d500b412 refs/remotes/origin/alejguer-improve-readabiliy-geo\n  2314938437eb962dadd6a88f45d463f8ed2c7cec 3d3e36fa40e9b87b90ef31f80c63c767d0ef3638 refs/remotes/origin/ali/document-keyless-container-signing\n+ c8107330f8d5a938f6349743310db030ca5159e6 e155670196e4974659304c79e670b238192bce08 refs/remotes/origin/fc-add-failed-jobs-in-mr-part-2\n+ 9ec873de405b3c5078ad1c073711a222e7734337 eb7947e37d05460a94c988bf1f408f96228dd50d refs/remotes/origin/fc-mvc-details-page\n* 0000000000000000000000000000000000000000 36d214774f39d3c3d0569df8befd2b46d22ea94b refs/remotes/origin/group-runner-docs\n+ b357bfdec53b96e76582ac5dd64deb2d35dbe697 7b85d775b1a46ea94e0b241aa0b6aa37ae2e0b69 refs/remotes/origin/jwanjohi-add-abuse-training-data-table\n+ c9beb0b9c0b933903c12393acaa2c4447bb9035f fd13eda262c67a48495a0695659fea10b32e7e02 refs/remotes/origin/jy-permissions-blueprint\n+ 9ecf5a7fb7ca39a6a4296e569af0ddff1058a830 3341369e650c931c46d9880f3b781dc1e21c9f75 refs/remotes/origin/kassio/spike-pages-review-apps\n```\n\nThis change allows us to be much more efficient when mirroring repositories.\n\nDetails of the patch series, including discussions can be found [here](https://lore.kernel.org/git/cover.1683721293.git.ps@pks.im/).\n\n## 2. A new way to read Git attribute files\n[Git attribute](https://docs.gitlab.com/ee/user/project/git_attributes.html) is\na way to define attributes in a Git repository such as syntax highlighting. Until now, Git only read `.gitattribute` files in the wokrtree or the\n`.git/info/attributes` files. On Gitaly servers, we store repositories on disk\nas [bare\nrepositories](https://git-scm.com/docs/git-clone#Documentation/git-clone.txt---bare).\nThis means that on the server we don't keep worktrees around. To\nsupport gitattributes on GitLab then, we use a workaround whereby when the user\nchanges attributes on the default branch, we copy the contents of the blob\n`HEAD:.gitattribute` to the `info/attributes` file.\n\n\u003Cpre class=\"mermaid\">\nflowchart TD\n  A[User A] -->|edit HEAD:.gitattributes\u003Cbr/>git push| B[Gitaly]\n  B --> |copy HEAD:.gitattributes\u003Cbr/>to info/attributes| C[info/attributes file]\n  D[GitLab UI] --> |Display code with syntax highlighting| B\n  B -.->|how should I do syntax highlighting?\u003Cbr/>Read info/attributes file| C   \n\u003C/pre>\n\n### Solution: New git option to read attribute files directly\nTo get rid of this extra step of copying a blob to `info/attributes`,\nI added a new git\n[option](https://git-scm.com/docs/git#Documentation/git.txt---attr-sourcelttree-ishgt)\n`--attr-source=\u003Ctree>` whereby a caller can pass in a tree from which Git will\nread the attributes file directly. This way Git can read the attributes blob directly\nwithout a worktree and without having to copy the contents to `info/attributes` each time it changes.\n\n\u003Cpre class=\"mermaid\">\nflowchart TD\n    A[User A] -->|edit HEAD:.gitattributes\u003Cbr/>git push| B[Gitaly]\n    D[GitLab UI] --> |Display code with syntax highlighting|B\n    B --> |Directly read the HEAD:.gitattributes blob|B\n\u003C/pre>\n\nHaving this feature in Git allows us to simplify this process a lot. We no longer\nhave to manually copy over the contents to a separate file. Internally, this\nallows us to delete two RPCs, reducing complexity and improving performance.\n\nDetails of this patch series, including discussions can be found [here](https://lore.kernel.org/git/pull.1470.v6.git.git.1683346530487.gitgitgadget@gmail.com/).\n\n## 3. Bug fix in commit-graph generation numbers\nA regression for truncated commit-graph generation numbers is a bug that we have been hitting for\nspecific repositories, corrupting the commit-graph. The [commit\ngraph](https://git-scm.com/docs/commit-graph) is an important Git optimization\nthat speeds up commit graph walks. Commit graph walks happen whenever Git has to\nwalk through commit history. Any time we display commit history in the UI, for\ninstance, it  will trigger a commit graph walk. Keeping these fast is crucial to a\nsnappy browsing experience.\n\n### Solution: A patch series to fix the bug\nPatrick submitted a patch series to fix the regression for truncated commit-graph generation numbers bug \nDetails of this patch series, including discussions can be found [here](https://lore.kernel.org/git/f8a0a869e8b0882f05cac49d78f49ba3553d3c44.1679904401.git.ps@pks.im/).\n\n## 4. Fix for stale lockfiles in `git-receive-pack`\n`git-receive-pack(1)` is a Git command that handles the server-side of pushes. When `git push` is run\nagainst a GitLab server, Gitaly will handle the `ssh` or `http` request and\nspawn a `git-receive-pack(1)` process behind the scenes to handle the push.\n\n`git-receive-pack(1)` will write a lockfile when processing packfiles in order\nto prevent a race condition where a concurrent garbage-collecting process tries\nto delete the new packfile that is not yet being referenced by anything.\n\nWhen the `git-receive-pack(1)` process dies prematurely for whatever reason, this\nlockfile was being left around instead of being cleaned up. Busy repositories\nthat received many pushes a day could grow in size quickly due to the\naccumulation of these lockfiles.\n\n### Solution: A patch series to clean up unused lockfiles\nPatrick fixed this by submitting a patch series that allows `git-receive-pack(1)` to clean up its unused lockfiles. This allows GitLab to save space on its servers from having to keep useless lockfiles around.\n\nDetails of this patch series, including discussions can be found [here](https://lore.kernel.org/git/e1ee1d8026a361bc58d16bc741e2b347ada7a53e.1678431076.git.ps@pks.im/).\n\n## 5. Fixed geometric repacking with alternate object databases\n[Geometric repacking](https://git-scm.com/docs/git-repack#Documentation/git-repack.txt---geometricltfactorgt)\nis a repacking strategy where instead of packing everything into on giant pack\neach time, several packs are kept around according to a geometric progression\nbased on object size.\n\nThis is useful for large and very busy repositories so that housekeeping doesn't\nhave to pack all of its objects into a giant pack each time.\n\nUnfortunately, geometric repacking had various corner case bugs when an\nalternate object database was involved. At GitLab, we leverage the Git\nalternates mechanism to save space in the case of forks. A fork of a repository\nshares most files. Instead of keeping a second copy of all the data, when we\ncreate a fork, we can deduplicate this data by having both the source\nrepository, as well as the fork repository share objects by pointing to a third\nrepository. This means that only one copy of a blob needs to be kept around\nrather than two.\n\nGeometric repacking bugs prevented it from working in an object database that\nwas connected to an alternate object database.\n\n### Solution: A patch series\nThese bugs have been fixed via a patch series from Patrick. This\nhelps us as we improve our implementation of object pools in Gitaly.\n\nDetails of this patch series, including discussions can be found [here](https://lore.kernel.org/git/cover.1681452028.git.ps@pks.im/).\n",[1487,837,1488,269],"git","open source",{"slug":1490,"featured":6,"template":683},"contributions-to-latest-git-release","content:en-us:blog:contributions-to-latest-git-release.yml","Contributions To Latest Git Release","en-us/blog/contributions-to-latest-git-release.yml","en-us/blog/contributions-to-latest-git-release",{"_path":1496,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1497,"content":1502,"config":1508,"_id":1510,"_type":16,"title":1511,"_source":18,"_file":1512,"_stem":1513,"_extension":21},"/en-us/blog/value-stream-total-time-chart",{"title":1498,"description":1499,"ogTitle":1498,"ogDescription":1499,"noIndex":6,"ogImage":1060,"ogUrl":1500,"ogSiteName":697,"ogType":698,"canonicalUrls":1500,"schema":1501},"Value stream optimization with GitLab's Total Time Chart","Learn how this new analytics feature provides immediate insights about the time spent in each stage of your workstream.","https://about.gitlab.com/blog/value-stream-total-time-chart","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Value stream management: Total Time Chart simplifies top-down optimization flow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2023-06-01\",\n      }",{"title":1503,"description":1499,"authors":1504,"heroImage":1060,"date":1505,"body":1506,"category":14,"tags":1507},"Value stream management: Total Time Chart simplifies top-down optimization flow",[833],"2023-06-01","\n\nUnderstanding where time is spent during the development lifecycle is a crucial insight for software leaders when optimizing the value delivery to customers. Our new Value Stream Analytics Total Time Chart is a visualization that helps managers uncover how long it actually takes to complete the development process from idea to production. Managers also can learn how much time teams spend in each stage of the workflow.\n \n![The VSA Total Time Chart displays the average time to complete each value stream stage.](https://about.gitlab.com/images/blogimages/2023-05-07-vsa-overview.gif){: .shadow}\nValue Stream Analytics Total Time Chart\n{: .note.text-center}\n\nValue Stream Analytics is available out of the box in the GitLab platform. It surfaces the process and value delivery metrics through the unified data model that stores all the records around development efforts. Value Stream Analytics uses a backend process to collect and aggregate stage-level data into [three core objects](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#how-value-stream-analytics-works):\n\n- Value streams - container objects with stage list \n- Value stream stage - an event pair of start and end events\n- Value stream stage events - the smallest building blocks of the value stream. For example, from Issue created to Issue first added to board. See the [list of available stage events](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#value-stream-stage-events).\n\n> [Register for the GitLab 16 webinar](/sixteen/), where we will unveil the latest innovations in our AI-powered DevSecOps platform.\n\nWe added in the new chart the stages breakdown as a stacked area chart to make it easier to understand how each stage contributes to the total time, and how that changes over time. Each area in the chart represents a stage. By comparing the heights of each area, you can get an idea about how each stage contributes to the total time of the value stream. We also added a tool tip with the stages breakdown sorted top to bottom, to help you understand the stages in their correct order.\n\nThe new chart is available in the Value Stream Analytics Overview page (on the left sidebar, select **Analytics > Value stream**). This page includes four sections:\n  1.  Data filter text box - on the top of the Overview page you can use the [Data filters](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#data-filters) to view data that matches specific criteria or date range. \n  2. Stage navigation bar - below the filter text box you can use the the stage navigation bar to investigate what happened in the specific stage and to identify the items (issues/MRs) that are slowing down the stage time.\n  3. Key metrics tiles - the summary of the stream performance is displayed, above the chart in the [Key metrics tiles](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#key-metrics). \n  4. Overview charts - the newly added Total Time Chart and the [Task by type](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#view-tasks-by-type) chart. \n\nBut that's not all. The Total Time Chart also simplifies the top-down optimization flow, starting from the Value Streams Dashboard organization-level view to a drill-down into the performance of each project:\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/EA9Sbks27g4\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n\nFrom the Value Stream Analytics overview page, you can drill down from Key metrics tiles into other GitLab analytics pages for deeper investigations. You can also go up to the Value Streams Dashboard, or investigate the [DORA metrics](/solutions/value-stream-management/dora/) that are also available in the new dashboard.\n\nIt's important to note that the chart data is limited to items completed within the selected date range. Also, there could be points in time with no [\"stage event\"](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#value-stream-stage-events) actions. In these cases, the chart will display a dashed line to represent the missing data. These gaps can add contextual information about the workstream, and usually do not represent interruptions in the data. When there is \"no data\" for a specific stage, the stage line will be flat.\n\nTo learn more check out the [Value Stream Analytics documentation](https://docs.gitlab.com/ee/user/group/value_stream_analytics/).\n\nWith the Value Stream Analytics Total Time Chart, you get immediate insights about the time spent in each stage over time to determine if progress is being made. Try it out today and see the difference it can make in your workstream!\n",[1466,1128,836,728,727],{"slug":1509,"featured":6,"template":683},"value-stream-total-time-chart","content:en-us:blog:value-stream-total-time-chart.yml","Value Stream Total Time Chart","en-us/blog/value-stream-total-time-chart.yml","en-us/blog/value-stream-total-time-chart",{"_path":1515,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1516,"content":1522,"config":1529,"_id":1531,"_type":16,"title":1532,"_source":18,"_file":1533,"_stem":1534,"_extension":21},"/en-us/blog/lockheed-martin-aws-gitlab",{"title":1517,"description":1518,"ogTitle":1517,"ogDescription":1518,"noIndex":6,"ogImage":1519,"ogUrl":1520,"ogSiteName":697,"ogType":698,"canonicalUrls":1520,"schema":1521},"GitLab, AWS help strengthen Lockheed Martin’s digital transformation","Lockheed Martin’s software factory selected GitLab’s DevSecOps Platform, along with AWS, to streamline toolchains, increase collaboration, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668830/Blog/Hero%20Images/lockheed-martin-cover-2.jpg","https://about.gitlab.com/blog/lockheed-martin-aws-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab, AWS help strengthen Lockheed Martin’s digital transformation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2023-05-16\",\n      }",{"title":1517,"description":1518,"authors":1523,"heroImage":1519,"date":1525,"body":1526,"category":14,"tags":1527},[1524],"GitLab","2023-05-16","\nLockheed Martin launched its 1LMX initiative to transform its end-to-end business processes and systems. One focus of the transformation was to pare down the company’s wide variety of DevOps tools – each program or product line at Lockheed Martin had its own toolchain. To mitigate this issue, drive rapid production, and increase collaboration, Lockheed Martin adopted GitLab’s DevSecOps Platform, run on AWS.\n\n“GitLab has strengthened our 1LMX transformation, upgrading the way we collaborate and innovate to develop software. Now, all of our programs have access to a high-quality software development environment,” said Alan Hohn, Lockheed Martin’s Director of Software Strategy.\n\nGitLab’s DevSecOps Platform enables Lockheed Martin to ship software more efficiently and securely for thousands of their programs, ranging from satellite platforms and aerospace systems to ground control software and maritime surface and subsurface software.\n\nHere are some top-level benefits that Lockheed Martin has seen with GitLab’s DevSecOps Platform:\n* Using GitLab’s single platform, Lockheed Martin’s legacy projects are delivered to testing every six days, down from a monthly cadence using distributed toolchains.   \n* Developers experienced a 90% reduction in time spent on system maintenance.\n* The organization has seen 200% annual growth in adoption of The DevSecOps Platform.\n* AWS enabled automated Infrastructure as Code for a scalable and resilient cloud architecture.\n\n## Efficiency gains\n\nIn migrating to GitLab, Lockheed Martin has realized a number of benefits and eliminated obstacles. In three and a half years, Lockheed Martin has created 64,000 projects on GitLab, and created 110,000 continuous integration builds daily. \n\nAdditionally, they were able to retire thousands of separately maintained servers thereby reducing time spent on maintenance by 90%. GitLab further enables internal efficiency within the organization by allowing teams to securely share reusable code components in globally accessible environments. Since implementing GitLab, Lockheed Martin teams have added 18 new repositories a day for the past two years. \n\n## How GitLab, AWS, and Lockheed Martin work together\n\nIn 2022, after rapid adoption of GitLab created the need for a more scalable solution, Lockheed Martin, GitLab, and AWS worked together to automate and optimize Lockheed Martin's code deployment across the enterprise. \n\nThe solution started with a well-architected review of the design between Lockheed Martin, AWS, and GitLab. AWS then helped to automate and optimize the Lockheed Martin GitLab deployment for continuous integration and continuous delivery (CI/CD) environment by delivering Infrastructure as Code to deploy the environment in two hours vs. several hours previously. \n\nThe AWS team also established workflows to deliver a fully automated, highly available, disaster recovery-compliant, scalable architecture for GitLab enabling a consistent process that runs without manual intervention.\n\nAWS supported load balancing to auto-scale the deployment process based on developer demand for pipeline runs and user traffic so that developers are not waiting on their deployments to execute. Pre-migration testing was performed to establish baselines, followed by post-migration testing to measure performance and scalability gains in delivering faster deployments. \n\nAdditionally, monitoring and security controls were implemented to comply with Lockheed Martin policies. As a result, the team was able to deliver operational efficiencies with the number of build requests waiting to be processed decreasing from 200 to zero, and reduced time for code deployment across the enterprise.\n\nThis effort showcased how large enterprises with thousands of software developers can build and deploy automated, scalable, and resilient code pipelines in the cloud using platforms such as GitLab by leveraging AWS best practices.\n\nGitLab’s Chief Product Officer David DeSanto added, “For more than a century, Lockheed Martin has set the standard for innovation within the public sector, and demonstrates what is possible when organizations invest in digital transformation efforts.”\n\nLockheed Martin has 20,000 GitLab users, and is looking to double that number and migrate even more of their projects over to The DevSecOps Platform in the coming years. To dig deeper into how Lockheed Martin uses GitLab, read [our case study](/customers/lockheed-martin), and to learn more about GitLab for the Public Sector, visit [our site](/solutions/public-sector/).\n",[1528,1149,1428],"customers",{"slug":1530,"featured":6,"template":683},"lockheed-martin-aws-gitlab","content:en-us:blog:lockheed-martin-aws-gitlab.yml","Lockheed Martin Aws Gitlab","en-us/blog/lockheed-martin-aws-gitlab.yml","en-us/blog/lockheed-martin-aws-gitlab",{"_path":1536,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1537,"content":1543,"config":1549,"_id":1551,"_type":16,"title":1552,"_source":18,"_file":1553,"_stem":1554,"_extension":21},"/en-us/blog/rise-of-protestware",{"title":1538,"description":1539,"ogTitle":1538,"ogDescription":1539,"noIndex":6,"ogImage":1540,"ogUrl":1541,"ogSiteName":697,"ogType":698,"canonicalUrls":1541,"schema":1542},"Protestware threats: How to protect your software supply chain","Some people protest for change by changing code others depend on throughout the software supply chain. Learn more about protestware, its impact, and how to protect against it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669673/Blog/Hero%20Images/engineering.png","https://about.gitlab.com/blog/rise-of-protestware","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Protestware threats: How to protect your software supply chain\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2023-05-09\",\n      }",{"title":1538,"description":1539,"authors":1544,"heroImage":1540,"date":1545,"body":1546,"category":14,"tags":1547},[1285],"2023-05-09","\n\nIn 2016, the continuous integration (CI) pipelines of millions of projects failed because a developer decided to pull their projects from npm package registry in [protest of a request to take down or rename one of their packages](https://www.theregister.com/2016/03/23/npm_left_pad_chaos/). In January 2022, the maintainer of the widely used 'colors' and 'faker' packages on the npm registry modified [these projects](https://blog.sonatype.com/npm-libraries-colors-and-faker-sabotaged-in-protest-by-their-maintainer-what-to-do-now?hsLang=en-us), adding malicious code that infinitely printed gibberish in protest of corporations who use open source projects without giving back. These are two examples of \"protestware,\" a term that refers to software packages or applications that have been intentionally modified to send a political message. The impacts may range from seeing unexpected messages in a terminal or logs when building an application to serious adverse impacts like data deletion. \n\nWhile protestware remained rare for a long time, recent high-profile incidents have brought it back into the spotlight. Similar code injection variants like [typosquatting](https://www.kaspersky.com/resource-center/definitions/what-is-typosquatting) packages (as in the case of the [colors npm](https://www.mend.io/resources/blog/new-typosquating-attack-on-npm-package-colors-using-cross-language-technique-explained/) package, where bad actors created compromised clones of packages with similar names) and compromised packages (as in the case of the [ctx PyPI packages](https://www.theregister.com/2022/05/24/pypi_ctx_package_compromised/)) are usually perpetrated by bad actors looking to cause harm. Protestware is unusual in that the custodians of projects trusted by the community have allowed or made these changes. Regardless of whether the changes' impacts are harmful, such changes raise ethical concerns and can create unwanted distractions. These risks also reinforce the need for open source consumers to adopt a [zero trust security model](/blog/why-devops-and-zero-trust-go-together/) for their software supply chain. Trust, but verify!\n\nThe world is going through unprecedented movements demanding change, and change seekers will find new and often disruptive ways to be heard, as we have seen in the case of everything from climate activism to TikTok challenges. Software supply chains are not exempt and, as we have learned from past incidents, being proactive is key to staying secure.\n\nHere are some steps you can take to protect your software supply chain by ensuring your dependencies are secure.\n\n## Implement dependency scanning\n\n[Dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/) is now an industry standard, and there is no shortage of tools or libraries to scan your packages, containers, or any other binary formats for vulnerabilities. Using GitLab CI’s [`rules:exists`](https://docs.gitlab.com/ee/ci/yaml/#rulesexists) rule, GitLab checks for the presence of certain files to determine the appropriate scans to check for vulnerabilities. Coupled with [Vulnerability Reports](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/), [Policy Management](https://docs.gitlab.com/ee/user/application_security/policies/index.html#policy-management), and the [Security Dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/index.html), your security team and organization can stay ahead of vulnerabilities. To include dependency scanning in your CI pipeline, add the following lines to your `.gitlab-ci.yml` file. You can explore the [Dependency Scanning CI template](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Jobs/Dependency-Scanning.gitlab-ci.yml) to understand how it works. \n\n```\n\ninclude:\n\n  template: Jobs/Dependency-Scanning.gitlab-ci.yml\n\n```\n\nRunning the CI script against an example [Ruby on Rails project](https://gitlab.com/gitlab-de/playground/ruby-rails-demo) with Ruby 3.0.4, the [Vulnerability Report](https://gitlab.com/gitlab-de/playground/ruby-rails-demo/-/security/vulnerability_report/?scanner=GitLab.DEPENDENCY_SCANNING) shows more than 70 vulnerabilities detected for the dependencies in the project’s [Gemfile](https://gitlab.com/gitlab-de/playground/ruby-rails-demo/-/blob/master/Gemfile).\n\n\n![Vulnerability Report Image](https://about.gitlab.com/images/blogimages/2023-04-rise-of-protestware/vulnerability-report.png \"Vulnerability Report Image\")\n\n\n## Generate provenance validations\n\nUsers of packages can verify they are not downloading a compromised version using [artifact attestation](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#artifact-attestation), which was [introduced in GitLab Runner 15.1](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28940/). Attestation metadata is generated in the [in-toto format](https://github.com/in-toto/attestation); it provides [provenance](https://slsa.dev/provenance/v0.2) attesting to how a binary was built, and you can verify the artifacts against the provenance. This allows you to achieve [Level 2](/blog/achieve-slsa-level-2-compliance-with-gitlab/) of the Supply-chain Levels for Software Artifacts ([SLSA](https://slsa.dev/)) security framework. \n\nThe demo video below shows how to configure your CI script to generate artifact attestation metadata.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/MlIdqrDgI8U\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\n## Utilize private registries\n\n[Self-hosting registries](https://docs.gitlab.com/ee/user/packages/) for packages, container images, or your Terraform modules are a more secure way of ensuring secure and vetted packages are used by your team. Security and compliance teams are enabled to ensure total control of the dependencies used in the entire organization and how they are accessed with [package registry permissions](https://docs.gitlab.com/ee/user/packages/package_registry/index.html#package-registry-visibility-permissions). GitLab supports container, infrastructure, and package registries. Package registries supported include Composer (PHP), Conan (C/C++), Generic, Maven (Java), npm (NodeJS), NuGet (Windows packaging), PyPI (Python), and RubyGems (Ruby).\n\n## Enable Dependency Proxy\nThe [Dependency Proxy](https://docs.gitlab.com/ee/user/packages/dependency_proxy/index.html) reduces the number of requests made to upstream dependency registries by acting as a local proxy. This reduces the impact of changes or vulnerabilities in the upstream packages, as a clean version will still be stored in the Dependency Proxy’s cache. This offers faster build times, since the cache is most likely closer to the build system that needs the image, and it ensures continuity when an upstream registry is having downtime or enforcing rate limits — as in the case of [Docker Hub](https://docs.docker.com/docker-hub/download-rate-limit/), which has a limit of 100 container image pulls per 6 hours per IP address container image for anynomous users as of the time of writing this article.\n\nYou can enable Dependency Proxy in the Packages and Registries section of a group’s settings. Only an administrator can enable/disable the Dependency Proxy for a GitLab instance. \n\n![Dependency Proxy setting image](https://about.gitlab.com/images/blogimages/2023-04-rise-of-protestware/dependency-proxy.png \"Dependency Proxy Setting Image\")\n\n\nTo use the Dependency Proxy in your CI script, you can use the `CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX` predefined variable as shown below:\n\n```\n\n# .gitlab-ci.yml\n\nimage: ${CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX}/ubuntu:latest\n\n```\n\nThe GitLab Runner automatically authenticates with the Dependency Proxy, but if your use case requires manual authentication, like building container images, you can use other predefined CI/CD variables as detailed in the [documentation](https://docs.gitlab.com/ee/user/packages/dependency_proxy/index.html).\n\nGitLab is also working on leveraging the Dependency Proxy to give more control to security teams with the [Dependency Firewall](https://about.gitlab.com/direction/package/#dependency-firewall), which will allow for control of how upstream packages are used and how they impact the organization. Package validation and version management can be managed from a central location without impacting the workflow of users.\n\nProactively instrumenting your software development lifecycle to ensure continuous review of your application along with controls is critical to keeping your software supply chain secure and preventing production problems due to protestware.\n",[9,1548,750,484],"zero trust",{"slug":1550,"featured":6,"template":683},"rise-of-protestware","content:en-us:blog:rise-of-protestware.yml","Rise Of Protestware","en-us/blog/rise-of-protestware.yml","en-us/blog/rise-of-protestware",{"_path":1556,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1557,"content":1563,"config":1570,"_id":1572,"_type":16,"title":1573,"_source":18,"_file":1574,"_stem":1575,"_extension":21},"/en-us/blog/gitlab-product-navigation",{"title":1558,"description":1559,"ogTitle":1558,"ogDescription":1559,"noIndex":6,"ogImage":1560,"ogUrl":1561,"ogSiteName":697,"ogType":698,"canonicalUrls":1561,"schema":1562},"Inside the vision for GitLab’s new platform navigation","A peek into what inspired our new navigation design, which is coming soon.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668078/Blog/Hero%20Images/cover-image-helm-registry.jpg","https://about.gitlab.com/blog/gitlab-product-navigation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside the vision for GitLab’s new platform navigation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christen Dybenko\"}],\n        \"datePublished\": \"2023-05-01\",\n      }",{"title":1558,"description":1559,"authors":1564,"heroImage":1560,"date":1566,"body":1567,"category":14,"tags":1568},[1565],"Christen Dybenko","2023-05-01","\n\nSoon, we’ll be launching an entirely redesigned navigation in the GitLab product that is based on feedback from users. We’re both excited and a little nervous because navigation is so critical to every user’s workflow. That’s why we made a thoughtful shift in our iteration strategy, taking extra time and intention to develop a new and refined vision. We'd like to share a peek into how we ended up where we did and why we are so excited for our new design!\n\n## We had to invest in the right user experience\n\nBecause it has such an obvious impact on user experience, a navigation overhaul is no small feat. That’s why we fully funded a team to work exclusively on navigation, and provided the time and space to create the best experience possible. During the past year, we put a big focus on design ideation and UX research. It was a lot of work, but we believe this level of user focus has really paid off.\n\nBacked by our amazing design and product leadership team, we put much of our focus on the new navigation for more than nine months while we designed and tested it with end users.\n\nIn this blog post, we’ll share insights on our process, what we learned, and our vision for the future.\n\n![New navigation](https://about.gitlab.com/images/blogimages/2023-04-20-new-navigation-vision/new-navigation-vision.png){: .shadow}\n\n## Predicting what users will need\n\nWhen we first started to think about how to redesign our navigation, the challenge seemed overwhelming. How do we know how to make the best decisions for our navigation? How can anyone know which design or solution is *right*?\n\nWe did not want to make users unhappy for even a short period of time. At GitLab, we have [15 user personas](https://handbook.gitlab.com/handbook/product/personas/#user-personas), incredibly savvy users, and so many different workflows. We had to consider opinions that were not present in our backlog. For example, our power users can be very verbose in issues, but new users are not.\n\nIt is a huge undertaking to get to this kind of understanding and know what is right. Time pressure and needing to ship quickly could have made this type of work impossible at this scale.\n\nThankfully, our team dedicated to navigation was amazing. They invested time to reveal our users' key pain points with navigation, which set the litmus test by which we could evaluate every mockup and solution.\n\n## Establishing a north star\n\nBefore we wrote a line of code or started planning, we did a crucial piece of alignment to know our goals. Our design team led us in a north star exercise where we examined every piece of [System Usability Score (SUS)](/handbook/product/ux/performance-indicators/system-usability-scale/) feedback we had received on navigation.\n\nWe coded this feedback and [three themes](/direction/manage/foundations/navigation_settings/#1-year-plan) emerged. We needed to:\n\n- minimize feelings of being overwhelmed\n- orient users across the platform\n- allow users to pick up where they left off easily\n\nThis north star was amazing for understanding the problem and how to proceed. We learned _a lot_ about what our users’ pain points are and what our users struggle with daily.\n\nThankfully, this also helped us remove the dread of trying to ship something with the impossible goal of being all things to all people as we could now test these three themes with any persona.\n\nWe applied the themes to every design validation effort that we conducted with users moving forward. Our UX Research team also conducted interviews to understand how users felt about these specific themes. It felt incredible to have these insights available right from the start. It was also empowering to let some of the noise go to focus more clearly on what matters and what would move us forward.\n\n## Shifting our perspective on iteration for the right user experience\n\nGitLab is amazing at [iteration](/handbook/engineering/workflow/iteration/), and lately, we’ve been raising the bar on the quality of our [MVCs](https://handbook.gitlab.com/handbook/product/product-principles/#the-minimal-viable-change-mvc) and [definition of done](https://docs.gitlab.com/ee/development/contributing/merge_request_workflow.html?#ui-changes) with the goal of not degrading the current user experience. For navigation, we took this extra seriously, with the intention of protecting every part of the navigation experience.\n\nAs we reviewed the history of many iterative navigation updates over the past five years, we could see that there was very little overall consistency in the code and in the intention of the updates. This is what happens at fast-moving startups, and it can be ok for a period of time, but at some point, it's necessary to take a pause to strip things back for a meaningful change. The small iterations over time gave us an indication of pain points overall, and we needed a thoughtful plan to proceed.\n\nWe decided that anything we change in this new navigation should not degrade a user’s core workflow. We would first hit a baseline for what currently exists in navigation and then make meaningful updates. We agreed that anything we ship after our Alpha had to be fully usable by our own team. We didn’t want users to feel like we’d moved backward or that they had lost functionality in this next phase.\n\nSo, while we have some exciting features planned for the future, we won’t take action on them until we fully refine the core features and address user feedback.\n\n## Iterations now and vision for the next year\n\nWhile holding the baseline promise of no degradation in the new navigation, we did find opportunities to ship small iterations to our current navigation since January. First, we shipped a new navigation called “Your Work” and second, we shipped a new “Explore” menu to all users. Those menus are central to our new navigation vision, but they improved the legacy navigation, too.\n\nAfter launch, we can’t wait to improve even further with more customizable navigation experiences like allowing pins on Your Work and seamless integration with search, command line, and keyboard use. We also have ideas on how to add better landing pages that make life more custom in GitLab, and we couldn’t do that without this new navigation.\n\n## No one likes a navigation re-design\n\nAll that said, we know that no one actually likes a navigation redesign, even if it is best in the long run. Core workflows are ingrained muscle memory that no one wants to mess with if possible.\n\nThat’s why we are releasing our new navigation with a built-in on/off switch. With this approach, you can gradually move to the new navigation by switching back and forth for a little while, as needed.\n\nOur hope is that you’ll take a similar approach and share your feedback along the way, too. We want to hear about your experiences, so please be honest and your feedback will help us iterate.\n\n## What to expect for rollout\n\nWe are proud of our vision for a new navigation! Over the next few months, our new navigation will be available via an opt-in process in the user profile menu, and we'd love your feedback. Watch our Twitter, upcoming release posts, and our [direction page](/direction/manage/foundations/navigation_settings/) for more information!\n",[680,1569,836,1210],"design",{"slug":1571,"featured":6,"template":683},"gitlab-product-navigation","content:en-us:blog:gitlab-product-navigation.yml","Gitlab Product Navigation","en-us/blog/gitlab-product-navigation.yml","en-us/blog/gitlab-product-navigation",{"_path":1577,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1578,"content":1584,"config":1590,"_id":1592,"_type":16,"title":1593,"_source":18,"_file":1594,"_stem":1595,"_extension":21},"/en-us/blog/gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops",{"title":1579,"description":1580,"ogTitle":1579,"ogDescription":1580,"noIndex":6,"ogImage":1581,"ogUrl":1582,"ogSiteName":697,"ogType":698,"canonicalUrls":1582,"schema":1583},"GitLab survey highlights wins, challenges as orgs adopt DevSecOps","This year’s survey findings show that DevSecOps principles, together with a DevSecOps platform, help organizations ship more secure software, faster.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663908/Blog/Hero%20Images/2023-devsecops-report-blog-banner2.png","https://about.gitlab.com/blog/gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab survey highlights wins, challenges as orgs adopt DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2023-04-20\",\n      }",{"title":1579,"description":1580,"authors":1585,"heroImage":1581,"date":1586,"body":1587,"category":14,"tags":1588},[980],"2023-04-20","\nSecurity is everyone’s responsibility. And when everyone works together and has access to the same tools, you don’t have to sacrifice performance, efficiency, or security. That's the message from the respondents of our recent survey of software developers, IT operations, and IT security professionals worldwide. Where there is unity among Development, Security, and Operations in the common goal of securing the software supply chain, there is success.\n\nOur first report from the survey, [Security Without Sacrifices](https://about.gitlab.com/developer-survey/previous/2023/), focuses on this throughline and illuminates where [DevSecOps](/topics/devsecops/) professionals feel positive about their efforts to secure the software development lifecycle and where they feel work still needs to be done. While the results are not surprising — they align with what I hear from customers every day — they reinforce GitLab’s belief that DevSecOps principles, coupled with a DevSecOps platform, help organizations ship more secure software, faster. \n\nFor instance, in last year’s report, a majority of development, security, and operations professionals said they felt individually responsible for security. This year, 53% of respondents said they are responsible for application security *as part of a larger team*. And 71% of security professionals said at least a quarter of all security vulnerabilities are being spotted by developers, up from 53% in 2022.\n\nWhat this tells us is that security is indeed making its way deep into the software development lifecycle and as more innovation is introduced into the daily workflow, including AI-assisted capabilities, the benefits are tangible.\n\nHere’s what the report findings suggest organizations should keep in mind so they can get the most out of DevSecOps.\n\n## AI is now inseparable from DevSecOps\nFor the past several years, we’ve seen AI become more and more established in software development workflows. In this year’s report, nearly two-thirds (65%) of developers said they are using AI in testing efforts or will be in the next three years. We also saw an uptick this year in the number of developers who are using AI to check code.\n\nAI represents a tectonic shift in the market that will have profound effects on how organizations deliver value to customers. To take full advantage of AI, it will be critical for organizations to apply AI-assisted workflows across the entire software development lifecycle and make them available to all personas — not just developers but everyone involved in the delivery of software value, from security and compliance teams to product development and marketing.\n\n## Security toolchain expansion is unsustainable\nThis year’s report showed that toolchain sprawl may be a bigger concern for security professionals than for the rest of the team; 57% of security respondents said they use six or more tools, compared to 48% of developers and 50% of operations professionals. We’re also seeing signs that security professionals are using _more_ tools than in past years. This is in line with what security practitioners tell me: They use different tools for each security function, including composition analysis, fuzzing, DAST, and dependency scanning.\n\nThe rise of DevOps and DevSecOps is making it easier for software development teams to consolidate tools, but the increased pressure around software supply chain security means this trend is not holding for security as it is for other roles. Security practitioners select the tools that get the job done and the tools they’re most comfortable with, but as security budgets tighten, that’s no longer going to be a sustainable strategy. We should expect to see a bigger push to consolidate security toolchains over the next several years.\n\n## Efficiency and security cannot be mutually exclusive\nThe first wave of budget tightening seems to be here already — 85% of the security professionals we surveyed told us they have the same or less budget this year than they did in 2022, and security professionals were also more likely than both developers and operations professionals to cite macroeconomic forces as a primary factor driving DevOps/DevSecOps to scale at their organizations. In this environment, organizations (and security teams) need to do more with less.\n\nFor many of the organizations I’ve talked to, tighter budgets mean more than just cutting costs. Organizations need to ensure they’re getting a swifter return on their DevSecOps investments. That return on investment could look like increased efficiency, translating into accelerated value delivery for customers, faster innovation, and more revenue. Or it could mean incorporating security and compliance tools earlier in the development lifecycle, reducing risk. Ideally, it’s all of the above. As organizations seek ways to stay ahead of the competition, security and efficiency are both non-negotiable.\n\n## A platform approach: The winning formula for DevSecOps\nHow can organizations foster collaboration, reduce toolchain friction, and boost efficiency without sacrificing security? A platform that puts DevSecOps methodologies into practice. This year’s respondents identified security and efficiency as the top two benefits of adopting a DevSecOps platform, ahead of automation, cost savings, and collaboration.\n\nA DevSecOps platform enables teams to collaborate in a single application, shortening cycle times, reducing risks, and accelerating everyone’s workflows. We see proof points in this year’s data: Security professionals who use a DevSecOps platform were significantly more likely than those who don’t use a platform to say developers catch more security vulnerabilities and had a higher opinion of their organization’s security efforts. \n\nIt has become important for organizations to foster collaboration and engagement to keep development, security, and operations teams happy. \n\n## Explore this year’s report\nRead the first report in our 2023 Global DevSecOps Report Series, [Security Without Sacrifices](https://about.gitlab.com/developer-survey/), and stay tuned for more reports on the data in the coming months.\n",[1589,9,750,771],"developer survey",{"slug":1591,"featured":6,"template":683},"gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops","content:en-us:blog:gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops.yml","Gitlab Survey Highlights Wins Challenges As Orgs Adopt Devsecops","en-us/blog/gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops.yml","en-us/blog/gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops",{"_path":1597,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1598,"content":1604,"config":1609,"_id":1611,"_type":16,"title":1612,"_source":18,"_file":1613,"_stem":1614,"_extension":21},"/en-us/blog/introducing-product-analytics-in-gitlab",{"title":1599,"description":1600,"ogTitle":1599,"ogDescription":1600,"noIndex":6,"ogImage":1601,"ogUrl":1602,"ogSiteName":697,"ogType":698,"canonicalUrls":1602,"schema":1603},"Product Analytics: A sneak peek at our upcoming feature","Our journey to add Product Analytics into the DevSecOps platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667086/Blog/Hero%20Images/blog-compliance.jpg","https://about.gitlab.com/blog/introducing-product-analytics-in-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Product Analytics: A sneak peek at our upcoming feature\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sam Kerr\"}],\n        \"datePublished\": \"2023-03-27\",\n      }",{"title":1599,"description":1600,"authors":1605,"heroImage":1601,"date":1606,"body":1607,"category":14,"tags":1608},[1206],"2023-03-27","\n\nProduct analytics are important to understand how your users engage with your application so that you can make data-driven decisions. Identifying features that your users make heavy use of and which they don’t can provide signals to teams on where and how to spend their time most effectively. Without product data, we must use one-off anecdotes or opinions, which can be subject to incorrect assumptions, internal biases, or are missing key details. At the same time, instrumenting applications and processing this data can be challenging, which leads many teams to not do it.\n\nAt GitLab, we view this workflow of instrumenting the app, collecting data, and processing it to gain insights as a key piece of the DevSecOps lifecycle. For this reason, we are working on adding Product Analytics capabilities to our platform so you’ll be able to take advantage of them in your own apps. You will be able to instrument features you have built, see how users engage with them, and make decisions using that data – all within GitLab.\n\nIn this blog, you'll learn more details on what our vision is, what we are working on, our future plans, and how you can contribute and engage with us.\n\n## What is Product Analytics?\n\nWe have a broad vision for what we want to achieve, which we outline in our [product direction page](https://about.gitlab.com/direction/analytics/product-analytics/). The short version is that we want to enable developers to easily add instrumentation to their applications, provide infrastructure to receive and process it, run experiments, and enable consumers of the data, such as product managers or developers, to use GitLab to gain insights that will help them make even better products.\n\nOur initial focus is on web applications, primarily those built with JavaScript and Ruby on Rails. Longer term, we want to add functionality like experiments and support for other web frameworks and additional tech stacks.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/jG42hesT030\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWe plan to use several open-source technologies to make this happen: [Snowplow](http://www.snowplow.io), [ClickHouse](https://clickhouse.com/), [Cube.dev](https://cube.dev/), and [ECharts](https://echarts.apache.org/en/index.html) for instrumentation, data storage, and data visualization, respectively. Each of these projects is great at what they do and we are excited to build with them.\n\n## How to configure Product Analytics\n\nOnce publicly available, Product Analytics will need access to a Kubernetes cluster running these applications. GitLab will be able to create and manage this cluster for you or you will be able to provide your own cluster. That cluster will then process, store, and transform your data and display it in relevant GitLab screens. You will then instrument your application’s features with one of our [client-side SDKs](https://gitlab.com/gitlab-org/analytics-section/product-analytics/gl-application-sdk-js). \n\nWith your app instrumented and your Product Analytics cluster set up, you will be able to access reports and dashboards within GitLab to explore the data that is reported. You'll be able to identify usage trends and better understand your users so that you can make improvements in future versions of your product.\n\nWe anticipate that one of the unique differentiators GitLab will have with Product Analytics is that all of the dashboard and visualization configurations will be driven by files in your GitLab project. You will be able to collaborate with your team using [merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/), look at previous versions to understand changes, and set up controls over who can make changes – just like you can with code.\n\n![Product Analytics dashboard](https://about.gitlab.com/images/blogimages/productanalyticsingitlab/productanalytics2.png){: .shadow}\n\n## We are customer zero \n\nOne of GitLab’s values is [dogfooding](https://handbook.gitlab.com/handbook/values/#dogfooding) and using our own product. This helps us better understand our users’ pain points and to find where we should make improvements more quickly. We are already dogfooding what we have built in Product Analytics so far.\n\nWe added Product Analytics to our internal handbook several months ago and have learned a lot about Product Analytics and how team members use the internal handbook.\n\nInstrumenting the internal handbook helped us work through the user experience for Product Analytics. We built out a workflow in GitLab to configure the cluster, view the Product Analytics dashboards, and view the content on them. This showed us what it would be like to instrument a real application. The steps we had difficulty doing showed what users would also likely have difficulty doing and, therefore, were an indication to focus on fixing those.\n\nOnce we had instrumented the handbook, we learned a few things we expected, such as people use the internal handbook primarily on weekdays and we see a massive dropoff in usage on weekends. One thing we didn’t expect was understanding which pages were the most viewed from the handbook. For example, we have various meetings to review [performance indicators](https://about.gitlab.com/handbook/product/#product-performance-indicators) and we saw large spikes in usage for relevant pages when those meetings occurred.\n\n![Screenshot showing spikes](https://about.gitlab.com/images/blogimages/productanalyticsingitlab/productanalytics1.png){: .shadow}\n\n## Our continued commitment to user privacy\n\nWe know that analytics offerings raise questions about user privacy and how data is being managed. Your data is your data. We want to build Product Analytics from the beginning so that you can respect the privacy of users. We are taking a few steps to accomplish this.\n\n* Product Analytics was designed to honor commonly recognized opt-out signals. That means users browsing the app will not have their activity recorded or analyzed by Product Analytics when an opt-out signal is received. Opt-out signals are becoming more common as a way of respecting privacy and we are excited to use them.\n* We are designing Product Analytics from the beginning to give you full control over the data you collect, rather than requiring it to be sent to a third-party service. Recall how you will have to provide a Kubernetes cluster with Snowplow, ClickHouse, and Cube.dev – you can provide your own cluster and GitLab can connect to it or we can host the cluster for you. In all cases, the data is yours – GitLab will not use this data beyond Product Analytics features and will not sell nor examine it.\n\n## What’s next?\n\nWe’re excited for the future of Product Analytics and to provide a way for you to learn even more about your users. Our near-term plans are to take what we have built so far and learn what improvements are needed to make it production ready for you to use. We are also working to give you a variety of options on how to best display Product Analytics data within GitLab so that it is easy for you to get started and to explore your data.\n\n## We’d love to hear from you\n\nAs we move forward with Product Analytics, we would love to hear your thoughts, comments, and questions. We have created [this Product Analytics feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/391970) if you want to start a discussion there.\n\nWe plan to release Product Analytics iteratively and will start with a small group of existing customers. If you are interested in previewing Product Analytics before it is generally available, please fill out [our contact form](https://forms.gle/3Q3srimfqpM4WCKM8).\n\n\u003Ci>Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.\u003C/i>\n",[837,680,836],{"slug":1610,"featured":6,"template":683},"introducing-product-analytics-in-gitlab","content:en-us:blog:introducing-product-analytics-in-gitlab.yml","Introducing Product Analytics In Gitlab","en-us/blog/introducing-product-analytics-in-gitlab.yml","en-us/blog/introducing-product-analytics-in-gitlab",{"_path":1616,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1617,"content":1623,"config":1629,"_id":1631,"_type":16,"title":1632,"_source":18,"_file":1633,"_stem":1634,"_extension":21},"/en-us/blog/gitlab-and-google-cloud",{"title":1618,"description":1619,"ogTitle":1618,"ogDescription":1619,"noIndex":6,"ogImage":1620,"ogUrl":1621,"ogSiteName":697,"ogType":698,"canonicalUrls":1621,"schema":1622},"How GitLab and Google Cloud drive innovation and efficiency for retailers","Learn how pairing DevSecOps with multicloud environments eases the development burden on retailers.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667457/Blog/Hero%20Images/open_source_program_blog_image.jpg","https://about.gitlab.com/blog/gitlab-and-google-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab and Google Cloud drive innovation and efficiency for retailers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Regnard Raquedan\"}],\n        \"datePublished\": \"2023-03-08\",\n      }",{"title":1618,"description":1619,"authors":1624,"heroImage":1620,"date":1626,"body":1627,"category":14,"tags":1628},[1625],"Regnard Raquedan","2023-03-08","\nInnovation and growth can sometimes be at odds in the world of retail, especially when trying to develop, deploy, and manage modern applications across multicloud environments. GitLab and Google Cloud together help retailers create and secure software that scales along with their business.\n\nGitLab’s comprehensive [DevSecOps Platform](/the-source/platform/devops-teams-want-to-shake-off-diy-toolchains-a-platform-is-the-answer/) connects with Google Cloud’s [Distributed Cloud Edge](https://cloud.google.com/distributed-cloud/edge/latest/docs/overview) edge networking environment and [Anthos](https://cloud.google.com/anthos/docs/concepts/overview) cloud-centric container platform to provide retailers with enterprise-class features such as collaboration and planning, continuous integration ([CI](https://docs.gitlab.com/ee/ci/)), configuration management, and built-in security and compliance.\n\nGitLab enables development teams to streamline management of their distributed, hybrid environments right out of the gate. Retailers can utilize the following capabilities:\n\n* Agile planning and collaboration to ensure Anthos cloud container cluster configurations and policies are up to date and compliant with company standards.\n* Continuous integration to help develop quality code and configurations while simultaneously reducing code errors.\n* Configuration management to roll back to previously working Anthos states or configurations.\n* Native integration with Google Cloud to deploy software faster and more securely.\n\n## Why GitLab for retail?\n\nMulticloud environments are beneficial to retailers because they enable them to easily deploy and manage applications across a vast network of stores, warehouses, and the like. In addition, developing and hosting applications in the cloud provides more choice, faster delivery (i.e. time to market), real-time data access, and the ability to automatically scale resources (up or down). As more retailers look to cloud platforms to achieve these goals, GitLab is uniquely positioned to help them manage these environments in a way that keeps them agile, secure, and able to meet customer demands. \n\nGitLab’s DevSecOps Platform is geared toward helping retailers gain operational efficiencies throughout the software development lifecycle and across [multicloud environments](https://about.gitlab.com/topics/multicloud/) like Google Cloud. \n\nDevelopers at retailers can leverage the full range of features in GitLab’s DevSecOps Platform to build, test, deploy, and secure high-performance, low-latency business-critical applications, such as point of sale. \n\n[Google Distributed Cloud Edge](https://cloud.google.com/distributed-cloud/edge/latest/docs/overview) retail customers can use GitLab to manage their hybrid cloud policies, manage configurations, and administer [Anthos](https://cloud.google.com/anthos/docs) clusters. GitLab’s industry-leading DevSecOps Platform helps developers streamline in-store technology management processes and makes it easier for DevSecOps teams to collaborate. GitLab’s DevSecOps Platform also has built-in security and compliance to meet the unique auditing and reporting needs of retailers. \n\n## Use case: Automated deployment at scale\n\nRetail companies with multiple locations need technology that enables them to manage sprawling resources and maintain smooth operations, even when major changes are introduced. With GitLab’s DevSecOps Platform, retailers can automatically sync configurations and data across their Google Cloud, as well as other cloud and on-premises environments. This is critical for large retailers looking to scale hybrid Anthos clusters vertically across their network with Google Distributed Cloud Edge machines.\n\nGitLab also lets developers easily collaborate and make changes using Agile tools, Merge Requests, and requirements-based workflows. This creates a streamlined, audit-ready process that helps team members make decisions quickly.\n\nConfigurations are stored as YAML files in GitLab repositories, where teams can use a different repository per configuration state. Anthos Configuration Management then retrieves the appropriate configurations when network access is available, allowing for specific regional changes to be made.\n\nOnce changes are reconciled across regions, the new configurations are automatically applied and propagated to the correct Google Distributed Cloud Edge nodes. This secure, scalable process can be used at thousands of locations, decreasing the company's time to value and increasing ROI.\n\nHaving the right technology is a key driver of growth and innovation for retailers. Investing in technology and utilizing platforms like GitLab and Google Cloud can be a game changer for retailers looking to thrive in today's competitive market.\n",[1368,793,750,233],{"slug":1630,"featured":6,"template":683},"gitlab-and-google-cloud","content:en-us:blog:gitlab-and-google-cloud.yml","Gitlab And Google Cloud","en-us/blog/gitlab-and-google-cloud.yml","en-us/blog/gitlab-and-google-cloud",{"_path":1636,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1637,"content":1643,"config":1649,"_id":1651,"_type":16,"title":1652,"_source":18,"_file":1653,"_stem":1654,"_extension":21},"/en-us/blog/oidc",{"title":1638,"description":1639,"ogTitle":1638,"ogDescription":1639,"noIndex":6,"ogImage":1640,"ogUrl":1641,"ogSiteName":697,"ogType":698,"canonicalUrls":1641,"schema":1642},"Secure GitLab CI/CD workflows using OIDC JWT on a DevSecOps platform","Learn a new method to authenticate using JWT to increase the security of CI/CD workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667094/Blog/Hero%20Images/container-security.jpg","https://about.gitlab.com/blog/oidc","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Secure GitLab CI/CD workflows using OIDC JWT on a DevSecOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2023-02-28\",\n      }",{"title":1638,"description":1639,"authors":1644,"heroImage":1640,"date":1646,"body":1647,"category":14,"tags":1648},[1645],"Dov Hershkovitch","2023-02-28","\n\nSecuring CI/CD workflows can be challenging. This blog post walks you through the problem validation, explores the JWT token technology and how it can be used with OIDC authentication, and discusses implementation challenges with authorization realms. You will learn about the current possibilities and future plans with GitLab 16.0. \n\n### Variables vs. secrets\nVariables are an efficient way to control and inject parameters into your jobs and pipelines, making managing and configuring the CI/CD workflows easier. You can read more about [how to use CI/CD variables](https://about.gitlab.com/blog/demystifying-ci-cd-variables/). An extra layer of security on top of variables to mask and protect, for now, is our “best-effort” to prevent sensitive variables from being accidentally revealed. However, variables are not a drop-in replacement for secrets. [Securing secrets natively](https://gitlab.com/gitlab-org/gitlab/-/issues/217355) is a solution that GitLab aspires to provide. Meanwhile, we recommend storing sensitive information in a dedicated secrets management solution. As a company, we will provide you abilities to integrate and retrieve secrets as part of your CI/CD workflows. \n\n## Security shifting left\nSensitive information like passwords, secret tokens, or shared IDs required to access tools and platforms need to be securely stored. They must also be highly available to their owners and the teams who use them. There are various secrets management solutions and frameworks available. They have addressed one problem but created new problems. For example: \"Which tool is right for our needs?\" More importantly, in software development: \"What's the best way to integrate this into our DevOps processes so that we're secure but still operating as efficiently as possible?\" Ignoring the security protocols in your organization is not an option. However, sensitive information should be stored as securely as possible. Something as simple as an access token stored in plain text can lead to security leaks and business incidents in the worst-case scenarios.\n\n## Initial support for JWT\nThe [JSON Web Token (JWT)](https://en.wikipedia.org/wiki/JSON_Web_Token) aims to build the integration bridge as an open standard for security claims exchange. It is a signed, short-lived, contextualized token that allows everyone to implement authentication between different products securely. The JWT consists of three parts: a header, a payload, and a signature.\n\n- The header represents the type of the token and the encryption algorithm.\n- The signature ensures that the token hasn't been altered.\n- The payload comprises a series of claims representing the information exchanged between two parties, which includes information about a GitLab user (ID, email, login) and the pipeline information (pipeline ID, job ID, environment, and more).\n\n_Example of GitLab JWT payload_\n\n```\n{\n  \"jti\": \"c82eeb0c-5c6f-4a33-abf5-4c474b92b558\",\n  \"iss\": \"gitlab.example.com\",\n  \"iat\": 1585710286,\n  \"nbf\": 1585798372,\n  \"exp\": 1585713886,\n  \"sub\": \"job_1212\",\n  \"namespace_id\": \"1\",\n  \"namespace_path\": \"mygroup\",\n  \"project_id\": \"22\",\n  \"project_path\": \"mygroup/myproject\",\n  \"user_id\": \"42\",\n  \"user_login\": \"myuser\",\n  \"user_email\": \"myuser@example.com\",\n  \"pipeline_id\": \"1212\",\n  \"pipeline_source\": \"web\",\n  \"job_id\": \"1212\",\n  \"ref\": \"auto-deploy-2020-04-01\",\n  \"ref_type\": \"branch\",\n  \"ref_protected\": \"true\",\n  \"environment\": \"production\",\n  \"environment_protected\": \"true\"\n}\n```\nUsing this information (called \"claims\"), you can implement an authentication condition where the token will get rejected if one of those claims does not match. You can use this to restrict access to only the authorized users and jobs in your pipelines.\n\nGitLab 12.10 added [initial support for JWT token-based connections](https://about.gitlab.com/releases/2020/04/22/gitlab-12-10-released/#retrieve-cicd-secrets-from-hashicorp-vault), which was later [enhanced](https://about.gitlab.com/releases/2020/09/22/gitlab-13-4-released/#use-hashicorp-vault-secrets-in-ci-jobs) with the `secrets:` keyword, as well as the `CI_JOB_JWT` predefined CI/CD variable, which is automatically injected into every job in a pipeline. This implementation was restricted to Hashicorp Vault, and users can use it to read secrets directly from the vault as part of their CI/CD workflow.\n \n### OIDC (JWT Version 2)\nThe logic we used to build the initial support for JWT opened up the possibility of connecting to other providers as well, but the first iteration was still restricted to Hashicorp Vault users.\n\nThis problem was addressed in GitLab 14.7 when we [released](https://about.gitlab.com/releases/2022/01/22/gitlab-14-7-released/#openid-connect-support-for-gitlab-cicd) the first \"Alpha\" version of JWT V2, which provided [Open ID Connect (OIDC)](https://openid.net/connect/) support for CI/CD.\n\nOIDC is an identity layer implemented on top of the JSON web token. You can securely authenticate against many products and services that implement OIDC, including AWS, GCP, and many more, making better use of the token's potential. Similar to our first JWT iteration, we added another [predefined CI/CD variable](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) `CI_JOB_JWT_V2` which is also automatically injected into every job in a CI/CD pipeline.\n\n### Securely store your secrets \nYour software supply chain should include everything needed to deliver and run your software. Securing your supply chain means you need to secure your software and the surrounding (cloud-native) infrastructure. In [GitLab 15.9](https://about.gitlab.com/releases/2023/02/22/gitlab-15-9-released/), we've added additional layers of protection to move our OIDC token from an Experiment to General Availability, increasing the security of your CI/CD workflows. \n\n\n#### Opt-in JWT token\nJSON web tokens (V1 and V2) are stored in CI/CD variables, which are injected automatically into all jobs in a CI/CD pipeline. However, it is likely most jobs in your pipeline do not need the token. In addition to the inefficiency of injecting unused tokens into all jobs in a pipeline, there is a potential security vulnerability. All it takes is one compromised job for this token to be leaked and used by an attacker to retrieve sensitive information from your organization. To minimize this risk, we've added the ability to restrict the token variable from all jobs in your pipeline and expose it only to the specific jobs that need it.\n\nTo declare the JSON web token in a job that needs it, configure the job in the `.gitlab-ci.yml` configuration file following this example:\n\n```yaml\njob_name:\n  id_token:\n    MY_JOB_JWT: # or any other variable name\n  ...\n```\n\nYou can minimize the token exposure across your pipeline, but ensure it is available to the jobs that require it.\n\n#### Audience claim (`aud:`)\nClaims constitute the payload part of a JSON web token and represent a set of information exchanged between two parties. The JWT standard distinguishes between reserved, public, and private claims.\n\nThe audience (`aud:`) claim is a reserved claim, which identifies the audience that the JWT is intended for (the target of the token). In other words, which services, APIs, or products should accept this token. If the audience claim does not match, the token is rejected, so the audience claim is an essential part of software supply chain security.\n\nThe option to configure the audience claim is done in the CI/CD configuration when [declaring the usage of the JWT token](https://docs.gitlab.com/ee/ci/secrets/id_token_authentication.html#id-tokens), if we'll continue from the previous example:\n\n```yaml\njob_name:\n  id_token:\n    MY_JOB_JWT: # or any other variable name\n        aud: \"...\" # mandatory field\n  script:\n    - my-authentication-script.sh MY_JOB_JWT….. # use the declared variables in a script\n  ```\n\nConfiguring the audience claim is mandatory for Vault users that leverage the [GitLab/Vault native integration](https://docs.gitlab.com/ee/ci/secrets/#use-vault-secrets-in-a-ci-job) (using the 'secrets:' keyword).\n\n```yaml\njob_name:\n  secrets:\n    VAULT_JWT_1: # or any other variable name\n      id_token:\n        aud: 'devs' # audience claim configuration\n    STAGING_DATABASE_PASSWORD: # VAULT_JWT_1 is the token to be used\n      vault: staging/db/password@ops\n```\n\n### Breaking changes and backward compatibility \nWe understand the increasing demand to secure your software supply chain. We recognize that many of our current users already use the JWT in what will soon be the \"old JWT method\" (V1). To mitigate this conflict, we've decided that moving to the new (OIDC) JWT method is optional until the next major release (GitLab 16.0). To use the new (OIDC) token, users must opt-in to this change from the UI settings and update the pipeline configuration, as explained in the previous sections. Users can continue using the Experiment or the \"old method\" until GitLab 16.0. (At that point, only the \"new\" (OIDC) JWT token and method will be available.)\n\nSeveral breaking changes were announced for both [Vault users](https://docs.gitlab.com/ee/update/deprecations.html#hashicorp-vault-integration-will-no-longer-use-ci_job_jwt-by-default) and [users of the JWT \"old\" methods](https://docs.gitlab.com/ee/update/deprecations.html#old-versions-of-json-web-tokens-are-deprecated). Those changes are scheduled for GitLab 16.0.\n\n## Three ways to use the JWT token\nThere are three ways to use a JWT to authenticate against different products in your CI/CD pipeline:\n- The \"old\" method, using the `secrets:` keyword and the `CI_JOB_JWT` variable, which is mainly used to integrate with Hashicorp Vault.\n- An \"Alpha\" version that uses the `CI_JOB_JWT_V2` OIDC token to integrate with different cloud providers.\n- A production-ready OIDC token, which is a secured version of the `CI_JOB_JWT_V2` token, used to authenticate with a variety of different products, like Vault, GCP, AWS, and so on.\n\nAll three methods are available until the next major version (GitLab 16.0). At that point, only the secured OIDC token will be available.\n\nTo prepare for this change, you should:\n\n1. Configure your pipelines to use the fully configurable and more secure [id_token](https://docs.gitlab.com/ee/ci/yaml/index.html#id_tokens) keyword.\n2. Enable the [Limit JSON Web Token (JWT) access setting](https://docs.gitlab.com/ee/ci/secrets/id_token_authentication.html#enable-automatic-id-token-authentication), which prevents the old tokens from being exposed to any jobs. (This setting will be permanently enabled for all projects in GitLab 16.0).\n3. If you use GitLab/Hashicorp native integration (using the [secrets:vault](https://docs.gitlab.com/ee/ci/yaml/#secretsvault) keyword), ensure the bound audience is prefixed with `https://`.\n\nThis should ensure a smooth transition to [GitLab 16.0](/upcoming-releases/) without breaking your existing workflows.\n\n\n",[749,9,793,1369],{"slug":1650,"featured":6,"template":683},"oidc","content:en-us:blog:oidc.yml","Oidc","en-us/blog/oidc.yml","en-us/blog/oidc",{"_path":1656,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1657,"content":1663,"config":1669,"_id":1671,"_type":16,"title":1672,"_source":18,"_file":1673,"_stem":1674,"_extension":21},"/en-us/blog/machine-learning-and-devsecops",{"title":1658,"description":1659,"ogTitle":1658,"ogDescription":1659,"noIndex":6,"ogImage":1660,"ogUrl":1661,"ogSiteName":697,"ogType":698,"canonicalUrls":1661,"schema":1662},"Machine learning and DevSecOps: Inside the OctoML/GitLab integration","MLOps and DevSecOps teams can unify their workflows and gain automation and cost efficiencies.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666915/Blog/Hero%20Images/autodevops.jpg","https://about.gitlab.com/blog/machine-learning-and-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Machine learning and DevSecOps: Inside the OctoML/GitLab integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sameer Farooqui, OctoML\"}],\n        \"datePublished\": \"2023-02-23\",\n      }",{"title":1658,"description":1659,"authors":1664,"heroImage":1660,"date":1666,"body":1667,"category":14,"tags":1668},[1665],"Sameer Farooqui, OctoML","2023-02-23","\n\nMachine learning can be a powerful tool in software development, but not if it has to live apart from existing engineering workflows. DevSecOps teams, including MLOps, can now integrate [OctoML CLI](https://gitlab.com/octoml/octoml-public/octoml-cli-tutorials) into [GitLab’s CI/CD Pipelines](https://docs.gitlab.com/ee/ci/pipelines/) to unify workflows and leverage existing deployment and monitoring infrastructure. This integration makes it easier to catch bugs and model performance degradations early in the ML development cycle. \n\nThe OctoML Platform is a machine learning model optimization and deployment service powered by octoml.ai. [Machine learning has grown in popularity](/blog/top-10-ways-machine-learning-may-help-devops/) in DevSecOps, along with AI, because of its ability to learn and model how to perform complex tasks as a human would and then automate those tasks.\n\n## How does CI/CD apply to machine learning?\n\nOnce a machine learning model has been successfully deployed, it can get stale over time and its accuracy could degrade, a situation called “data drift”. Data drift causes newer inferencing data to drift away from the data used to train the model. In the retail industry, this can happen because of seasonality, as an example.\n\nProduction models must be regularly refreshed by retraining their weights with the latest data. Applying CI/CD concepts borrowed from software engineering, the OctoML CI integration makes the deployment process for trained/re-trained models automated and repeatable.\n\n## How OctoML CLI and GitLab work together\n\nNew commits to your inference code repository can run [OctoML CLI](https://github.com/octoml/octoml-cli-tutorials#readme) in your GitLab pipeline to automatically optimize machine learning models for lowest cost per inference and lowest latency, and then deploy the optimized model to your cloud registry. For customers looking for more granular packaging formats that integrate with existing containerization systems, OctoML offers [Python wheel packaging](https://app.octoml.ai/docs/deploy.html#python-wheel-deployment) and will soon offer YAML configuration files. To reduce model latency and serving costs, OctoML searches through multiple acceleration engines such as Apache TVM, ONNX Runtime, and TensorRT and then suggests the ideal CPU or GPU hardware type on AWS, Azure, or GCP.\n\n## Choice in cloud deployment targets\n\nUsing OctoML CLI, developers can send any trained model to OctoML’s SaaS platform for cost efficiency and cloud hardware benchmarking. By adapting and optimizing the trained model to leverage hardware intrinsics in CPU and GPUs, OctoML makes inferences run faster in production, thus saving users on cost per inference and improving the user experience of ML applications.\n\n![Cloud workflow](https://about.gitlab.com/images/blogimages/octomlintegration/image1.png){: .shadow}\n\nThe cloud workflow is designed for enterprise and production deployments. Here’s how it works:\n\n* The initial push from a developer to the GitLab repository launches a local, shared, or remote runner.\n* The runner will send the updated, trained model first to OctoML’s platform for acceleration and hardware adaptation.\n* Then, the pipeline pushes the accelerated model container to the GitLab Container Registry.\n* Finally, it deploys the container to a managed Kubernetes service in any of the major cloud providers.\n\nModels deployed via the accelerated cloud workflow not only provide end users the lowest latency user experience but also save the organization compute costs at inference time, which can amount to \\[90% of a production machine learning application’s compute costs](https://aws.amazon.com/blogs/machine-learning/reduce-ml-inference-costs-on-amazon-sagemaker-with-hardware-and-software-acceleration/).\n\n## Four required stages for every pipeline\n\nEach pipeline has four stages: setup, package, deploy, and test. Here’s the logical flow:\n\n![Logical flow](https://about.gitlab.com/images/blogimages/octomlintegration/image2.png){: .shadow}\n\n1. common:setup - produces OctoML CLI binary artifact and passes it on to local:package\n2. cloud:package - packages the incoming model into a Docker tarball using the OctoML CLI binary and passes the tarball to the next stage\n3. cloud:deploy - builds a Docker image from the Tarball and deploys the docker container to a remote registry (in our example, we deploy it to AWS via GitLab Container Registry using Flux, but there can be other mechanisms)\n4. cloud:test - run the user-provided test script\n\nWhen a cloud pipeline is executed, the GitLab Pipeline UI will display a corresponding workflow:\n\n![GitLab Pipeline UI](https://about.gitlab.com/images/blogimages/octomlintegration/image3.png){: .shadow}\n\nSimilar to any other GitLab CI/CD job, our [example repository](https://gitlab.com/octoml/octoml-public/octoml-cli-tutorials) has YAML files that define how each stage will execute. You can easily clone the repository or code and adapt it to your custom model and inference code:\n\n![example repository](https://about.gitlab.com/images/blogimages/octomlintegration/image4.png){: .shadow}\n\nIn addition to the stage YAML files, OctoML CLI also has its own `octoml.yaml` configuration, which defines the path to your model, hardware type the model should be accelerated for, and the model’s input shapes:\n\n![octoml.yaml config](https://about.gitlab.com/images/blogimages/octomlintegration/image5.png){: .shadow}\n\n## Get started with OctoML CLI and GitLab CI/CD\n\nOctoML CLI and GitLab CI/CD unify your software engineering and machine learning pipelines by allowing ML models to be deployed using the same infrastructure and processes you’re currently using for software applications. Further, our integration makes it seamless to start with local model deployments to test end-to-end inference and move to accelerated cloud deployments with minimal changes to your workflow.\n\n**We’ve [published tutorials](https://gitlab.com/octoml/octoml-public/octoml-cli-tutorials) with an NLP (Bertsquad) and Vision (YOLOv5) model for end-to-end examples. So, to get started, download the [OctoML CLI](https://try.octoml.ai/cli/) and [request an acceleration consultation](https://try.octoml.ai/cli/#lp-pom-block-105) to receive a token to OctoML’s SaaS platform.**\n",[233,771,9],{"slug":1670,"featured":6,"template":683},"machine-learning-and-devsecops","content:en-us:blog:machine-learning-and-devsecops.yml","Machine Learning And Devsecops","en-us/blog/machine-learning-and-devsecops.yml","en-us/blog/machine-learning-and-devsecops",{"_path":1676,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1677,"content":1683,"config":1689,"_id":1691,"_type":16,"title":1692,"_source":18,"_file":1693,"_stem":1694,"_extension":21},"/en-us/blog/its-time-to-put-the-sec-in-devsecops",{"title":1678,"description":1679,"ogTitle":1678,"ogDescription":1679,"noIndex":6,"ogImage":1680,"ogUrl":1681,"ogSiteName":697,"ogType":698,"canonicalUrls":1681,"schema":1682},"It’s time to really put the Sec in DevSecOps","Organizations may tack on security to DevOps but unless they wholly integrate it, they will miss out on DevSecOps benefits.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671091/Blog/Hero%20Images/lock.jpg","https://about.gitlab.com/blog/its-time-to-put-the-sec-in-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"It’s time to really put the Sec in DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Francis Ofungwu\"}],\n        \"datePublished\": \"2023-02-02\",\n      }",{"title":1678,"description":1679,"authors":1684,"heroImage":1680,"date":1686,"body":1687,"category":14,"tags":1688},[1685],"Francis Ofungwu","2023-02-02","\nWe all know that DevOps and security are intertwined. And a lot of lip service is paid to surface integrations between the two. But until your organization goes [all-in on a DevSecOps strategy](/blog/why-the-market-is-moving-to-a-platform-approach-to-devsecops/#understanding-devops-pain-points) – where Sec is wholly embedded with Dev and Ops, you will miss out on the benefits a holistic approach brings.\n\nToday, the friction between DevOps and security teams comes from objectives that, at first glance, seem diametrically opposed (spoiler alert: they aren’t). Developers want to create great products at the velocity the business requires, and security teams want to effectively manage risks using methodical frameworks that require some level of structure. Day-to-day collaboration between the two groups can be challenging because their workflows and incentives differ.\n\nIn [GitLab’s 2022 Global DevSecOps Survey](/developer-survey/), we found that developers are seeing security scanning increasing across all categories (SAST, DAST, container scanning, dependency scanning, and license compliance), but this uplift is not translating into vulnerability reduction, as 56% of respondents said it was difficult to get developers to actually prioritize fixing code.\n\nAnd so they stay in silos.\n\n## Separation between security and DevOps doesn’t work\n\nWe know through our experiences that security and DevOps often only come together in emergencies. When there is a high-risk incident, such as a breach, security and DevOps teams are forced together on endless incident calls that function more like a “get to know you” exercise driven by rudimentary questions: What does that app do? Why are you using that library with a vulnerability from 2010? What do you mean it’s not exploitable?\n\nWe can – and should – agree that emergencies are not the best time for this level of discovery. You wouldn’t want a firefighter asking if your building is up to code before they start putting out a fire. But due to the lack of frequent collaboration, development and security teams use incidents as the time to play catchup and really dig into the basics of the development lifecycle.\n\n## Sec is more than just a few letters between Dev and Ops\n\nConfusion in the industry hasn’t helped. The industry has come to recognize – and in some cases, exploit – the frustration of these silos. They will plop the “Sec” in between Dev and Ops and market a laundry list of point solutions that solve only a small portion of the problem, and leave DevOps and security teams with a [complex toolchain](/the-source/platform/devops-teams-want-to-shake-off-diy-toolchains-a-platform-is-the-answer/) to manage and maintain. The alarming rate of cyber attacks and breaches in the headlines makes it obvious this approach is not working. So what’s the issue?\n\nI liken where we are now to the challenges that the healthcare industry faced a decade ago in trying to convince physicians of the benefits of hand hygiene. At the time, in the U.S., healthcare-associated infections affected more than 2 million people every year, while compliance with required hygiene standards by healthcare workers was below 40%, [an article from that time period](https://www.hcinnovationgroup.com/home/blog/13020327/the-freakonomics-of-behavior-change-in-healthcare) states. A Los Angeles hospital, aiming to solve this problem, was requiring a 100% hygiene compliance rate among its physicians – should have been a simple task among a population that understands the poor outcomes related to noncompliant behavior, right? No. Several carrot-and-stick approaches to changing behavior of the physicians yielded mixed results.\n\nRelying on humans to change their behavior can be fruitless, the researchers found, according to the article: “Organizations should focus instead on innovations through technology or design.” In other words, we should not rely on behavior change from individuals to drive meaningful, long-lasting transformation. We need to use technology as the invisible hand that reinforces the right behavior and enacts course correction when we deviate from expected actions.\n\nThe corollary is that in the tech industry, we have evangelized for [security and DevOps to be together](https://about.gitlab.com/solutions/security-compliance/) and have talked about why it makes sense (improved software supply chain security, management of threat vectors, and adherence to compliance requirements, for example). When we share the [vision of DevSecOps](/topics/devsecops/) there are head nods and agreements that this unification is the right thing to do for the good of the business, but when it comes down to it, the actual implementation is lacking.\n\n## What it means to be fully DevSecOps\n\nDevSecOps has to be a practice in every sense of the word. It can’t be theory or an academic exercise. DevSecOps should be an implementation of cultural, organizational, and technical changes designed to optimize delivery and maintenance of software. Characteristics of DevSecOps will include:\n- Reducing the time required to deliver quality software.\n- Automating processes required to identify, categorize, and remediate software bugs.\n- Designing the culture and operations of dev, sec, and ops and unifying these functions through values and workflows.\n\nFor DevSecOps as a practice to work, all stakeholders involved in the design, development, and maintenance of software need to commit to transparent collaboration at scale.\n\nWhat this means in action:\n\n- Eliminating one-way communication of security requirements: controls should be programmatically enforced and consumable via APIs.\n- Implementing policy as code: For adoption and consistency, the desired cultural shift and expectations have to be programmatically enforced.\n- Creating a unified view of threats at every level of the development lifecycle: All stakeholders should have insight to the same information that details the quality of the code. Having separate security scanners only operated by the security team does not drive collaboration.\n- Supporting in-context training inside of the development process: Build better developers by offering near real time evidence of vulnerabilities in their environment and code.\n- Reducing the amount of time developers spend in painful audits by investing in immutable development artifacts that evidence use of controls throughout the lifecycle.\n\nAt GitLab, we believe we are strongly positioned to accelerate your organization’s DevSecOps transformation. Our platform helps unify DevSecOps teams and drive the cultural, process, and governance programs required to deliver value to organizations seeking a more effective and sustainable way to develop better, more secure software faster.\n\nCover image by [Georg Bommeli](https://unsplash.com/@calina?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/ybtUqjybcjE?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[750,1128,484],{"slug":1690,"featured":6,"template":683},"its-time-to-put-the-sec-in-devsecops","content:en-us:blog:its-time-to-put-the-sec-in-devsecops.yml","Its Time To Put The Sec In Devsecops","en-us/blog/its-time-to-put-the-sec-in-devsecops.yml","en-us/blog/its-time-to-put-the-sec-in-devsecops",{"_path":1696,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1697,"content":1703,"config":1709,"_id":1711,"_type":16,"title":1712,"_source":18,"_file":1713,"_stem":1714,"_extension":21},"/en-us/blog/four-approaches-to-gitlab-integrations",{"title":1698,"description":1699,"ogTitle":1698,"ogDescription":1699,"noIndex":6,"ogImage":1700,"ogUrl":1701,"ogSiteName":697,"ogType":698,"canonicalUrls":1701,"schema":1702},"4 approaches to GitLab integrations","Learn about use cases that help extract even more value from a DevSecOps platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667946/Blog/Hero%20Images/4-facets-of-gitlab-integration.png","https://about.gitlab.com/blog/four-approaches-to-gitlab-integrations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"4 approaches to GitLab integrations\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kurt Dusek\"}],\n        \"datePublished\": \"2023-01-26\",\n      }",{"title":1698,"description":1699,"authors":1704,"heroImage":1700,"date":1706,"body":1707,"category":14,"tags":1708},[1705],"Kurt Dusek","2023-01-26","\n\nThe benefit of a DevSecOps platform is to create a foundation upon which an organization can build its entire development process. Rather than having to log onto several different systems to manage, observe, and advance through the software development lifecycle, DevSecOps teams have one application to serve as their system of record. To augment the platform and create even more business value, organizations can create integrations with third-party software and systems, while still maintaining a unified experience for stakeholders, developers, and operators.\n\nLet's look at what integrations are possible and the use cases that drive them.\n\n## What can be integrated with GitLab\n\nAs a senior solutions architect for Alliances here at GitLab, I often get asked, \"How can I integrate GitLab with X?\" My response: That depends on what's being integrated. X could be a cloud provider, point tool, legacy application or web service that might be used in the development cycle. \n\n## How to integrate with GitLab\n\nThere are four approaches to GitLab integrations:\n\n1. Use GitLab to deploy client applications to X / Host GitLab runners on X\n2. Host GitLab Server on X\n3. Integrate with the development cycle\n4. Deep GitLab application integration\n\nLet's dig deeper into each one.\n\n### 1. Use GitLab to deploy client applications to `X` or Host GitLab runners on `X`\nA very common use case and typically the easiest to achieve. For instance, platform providers, who want to make it easy for their users to run apps built with GitLab on their infrastructure or application server, are often asked for this option. The path is to have GitLab Server be able to authenticate to the hosting platform, and deploy the (ideally containerized) application to the platform.\n\nA close cousin of this is the need to deploy [GitLab runners](https://docs.gitlab.com/runner/) to the infrastructure and register them with a GitLab instance, be it GitLab.com or a self-managed instance. Runners are easy to setup and register, and can be [configured and scaled in many different ways](https://docs.gitlab.com/runner/fleet_scaling/). \n\n### 2. Host GitLab Server on `X`\nPlatform providers are also asked to host GitLab Server on their infrastructure. What makes this easy is GitLab runs almost anywhere; if you've got Linux, you can run GitLab Server (even on a Raspberry Pi). The work has already been done for the major cloud providers, including [GCP](https://docs.gitlab.com/ee/install/google_cloud_platform/), [AWS](https://docs.gitlab.com/ee/install/aws/), [Azure](https://docs.gitlab.com/ee/install/azure/), and [Oracle Cloud](https://docs.oracle.com/en/solutions/deploy-gitlab-ci-cd-oci/index.html). If you want to run on your own infrastructure, the [Omnibus](https://docs.gitlab.com/omnibus/) installer does most of the heavy lifting for you; it's the easiest way to self-host GitLab.  \n\n### 3. Integrate with the development cycle\nHere's where it starts to get a bit more involved. The good news is that GitLab has extensive [APIs](https://docs.gitlab.com/ee/api/) and [webhooks](https://docs.gitlab.com/ee/user/project/integrations/webhooks.html) that allow for listening for events and pushing and pulling data.\n\nIf the goal is to integrate with the [CI/CD pipeline](https://docs.gitlab.com/ee/ci/index.html), this can be done by creating a container image that encapsulates the application or scripts necessary and defining a job within the pipeline that uses this image to run the integration. It's likely the integrated app produces some output that **someone** needs to review. Displaying this output directly within the Merge Request elevates third-party data rather than something that has to be searched for in another system.  Depending on the nature of the tool being integrated, it's possible to show results and a [security report](https://docs.gitlab.com/ee/development/integrations/secure.html#report), [metrics report](https://docs.gitlab.com/ee/ci/testing/metrics_reports.html), or [artifact](https://docs.gitlab.com/ee/ci/pipelines/job_artifacts.html#expose-job-artifacts-in-the-merge-request-ui) that can contain almost any type of data.\n\n### 4. Deep GitLab application integration\nThis is the most complex since it requires an understanding of the [architecture of the GitLab application](https://docs.gitlab.com/ee/development/architecture.html#simplified-component-overview), and how an outside service will interact with and support this architecture. An example of this would be a managed PostgresSQL or Redis service. There's a potential risk of downtime if this type of integration goes wrong, so it's important to test thoroughly in a production-like environment before considering it production-ready. Fortunately GitLab publishes several tools to do this. [GitLab Performance Tool (GPT)](/handbook/support/workflows/gpt_quick_start.html) provides an excellent way to measure and report on the performance of a GitLab instance under various usage scenarios. Its counterpart [GitLab Browser Performance Tool](https://gitlab.com/gitlab-org/quality/performance-sitespeed) tests the browser performance of various GitLab pages.  \n\nRead more on [Kurt Dusek's blog](https://blog.scientifik.org/).\n",[233,284,1128],{"slug":1710,"featured":6,"template":683},"four-approaches-to-gitlab-integrations","content:en-us:blog:four-approaches-to-gitlab-integrations.yml","Four Approaches To Gitlab Integrations","en-us/blog/four-approaches-to-gitlab-integrations.yml","en-us/blog/four-approaches-to-gitlab-integrations",{"_path":1716,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1717,"content":1723,"config":1727,"_id":1729,"_type":16,"title":1730,"_source":18,"_file":1731,"_stem":1732,"_extension":21},"/en-us/blog/whats-next-for-devsecops",{"title":1718,"description":1719,"ogTitle":1718,"ogDescription":1719,"noIndex":6,"ogImage":1720,"ogUrl":1721,"ogSiteName":697,"ogType":698,"canonicalUrls":1721,"schema":1722},"GitLab’s 2023 predictions: What’s next for DevSecOps?","Check out insights on securing the supply chain, new uses for AI/ML, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663820/Blog/Hero%20Images/prediction.jpg","https://about.gitlab.com/blog/whats-next-for-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab’s 2023 predictions: What’s next for DevSecOps?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2023-01-26\",\n      }",{"title":1718,"description":1719,"authors":1724,"heroImage":1720,"date":1706,"body":1725,"category":14,"tags":1726},[746],"\nIn 2023, organizations will focus their time and resources on the continued shift left of security, completing the evolution from DevOps to [DevSecOps](/topics/devsecops/). GitLab Chief Marketing and Strategy Officer [Ashley Kramer](https://gitlab.com/akramer) says that every company will need to have security tightly integrated into DevOps to combat the increased threats throughout the software development lifecycle. In addition, DevSecOps teams will have to continue to focus on supply chain security, make optimal use of artificial intelligence and machine learning, and expand their use of value stream analytics. GitLab leaders from across disciplines share these predictions and more about how the industry will change this year.\n\n## Prediction 1: Protecting the supply chain will be the top priority\n\nSecurity will continue to be an organization-wide responsibility, shifting further left and spanning from [the IDE](/blog/get-ready-for-new-gitlab-web-ide/) to applications running in production, according to  [David DeSanto](https://gitlab.com/david), Chief Product Officer.\n\nIn our [2022 Global DevSecOps survey](https://about.gitlab.com/developer-survey/previous/2022/), 57% of sec team members said their orgs have either shifted security left or are planning to this year. Half of security professionals report that developers are failing to identify security issues – to the tune of 75% of vulnerabilities.\n\nThe shift left will be driven in part by the need for [tighter security for software supply chains](/blog/the-ultimate-guide-to-software-supply-chain-security/). “As remote development becomes more and more commonplace, software supply chain security will play a more expansive role across the software development lifecycle,” DeSanto says.\n\n[Francis Ofungwu](https://gitlab.com/fofungwu), Global Field CISO, predicts this supply chain security evolution will happen in three key ways:\n\n- The engineering frontlines will take on more ownership of managing threats in their day-to-day operations. In order to accomplish this, developers will need real-time context on vulnerabilities and remediation strategies in each phase of the software development lifecycle (SDLC), consequently reducing the likelihood of painful incidents in production environments.\n\n- Security and compliance teams will invest in transcribing their software assurance expectations into policy-as-code to reduce the manual and time-consuming security review processes that reduce development velocity.\n\n- As a result of headline-grabbing incidents highlighting enterprise risks in modern software development, organizations will build audit programs to better assess and report SDLC risks. This will require organizations to design how to deliver artifacts that prove the immutability of the controls deployed across all aspects of their development toolchain. \n\nOrganizations should also expect that “what have been best practices for supply chain security for many years, will now become regulatory requirements,” says [Corey Oas](https://gitlab.com/corey-oas), Manager, Security Compliance (Dedicated Markets). He points to [artifact attestation and software bill of materials (SBOM) generation](/blog/the-ultimate-guide-to-sboms/) as examples of best practices that will soon become federal government or industry mandates. “Both of these are integral to developer workflows.” \n\n[Sam White](https://gitlab.com/sam.white), Group Manager, Product - Govern, doubles down on the SBOM and artifact attestation prediction, saying both SBOMs and attestations will need ongoing attention from DevSecOps teams. “Expect to see a shift from looking at these as one-time events to them becoming part of a continuous evaluation process,” he says, adding that organizations will need deeper visibility into software dependencies (e.g. open source packages) and more centralization of software build information.\n\nAnother element of software supply chain security is [zero trust](/blog/why-devops-and-zero-trust-go-together/). “Organizations have considered zero trust strategies for a while, and it will be an implementation focus for them going forward,” predicts [Joel Krooswyk](https://gitlab.com/jkrooswyk), GitLab Federal CTO. “One reason for this movement, at least among federal agencies and their suppliers, is the recent release of the Department of Defense zero trust architecture strategy and roadmap and the inclusion of zero trust principles in several National Institute of Standards and Technology publications such as [800-207](https://csrc.nist.gov/publications/detail/sp/800-207/final).”\n\n> Get more public sector predictions with our webcast [“2022 Lookback & 2023 Predictions in Cybersecurity & Zero Trust with GitLab”](https://page.gitlab.com/2022_devsecopsusecase_Lookback_Predictions_PubSec_RegistrationPage.html)\n\n## Prediction 2: Security will burrow deep into DevOps education\n\nTo mirror the transformation of DevOps to DevSecOps, [DevOps training and education](/blog/5-ways-to-bring-devops-to-your-campus/) will include security as a key part of the curricula, White says. “Organizations will have to provide access to the training that developers need to get a baseline security knowledge, including why certain vulnerabilities are important and should be addressed right away,” he says.\n\n[Pj Metz](https://gitlab.com/PjMetz), Education Evangelist, believes 2023 will be the year that “Shift Left principles will show up in university classrooms.”\n\n“Already, the GitLab for Education team has seen more and more requests for information on DevSecOps, and not just in computer science and programming. Information systems students are looking to learn more about DevSecOps as well,” he says. ”Integrating security education directly into DevOps curricula will ensure that future professionals will be prepared for all aspects of DevSecOps.”\n\nAnd he encourages DevOps students to [ask for security to be added into their education](https://about.gitlab.com/the-source/security/the-future-of-devops-education-needs-to-include-security/) so they will be properly prepared for the workforce. \n\n## Prediction 3: AI/ML will be used throughout the SDLC\n\n“AI will become essential for productivity,” Kramer says. “For example, DevOps teams will integrate AI/ML to automate repetitive and difficult tasks. Ideally, this would ease the burden on developers by removing their cognitive load, decreasing the amount of context-switching they have to do, and enabling them to stay in the flow of development.\"\n\nAccording to our 2022 Global DevSecOps survey, 62% of respondents practice ModelOps, while 51% use AI/ML to check code.\n\n“Combining digital transformation with business analytics and AI - real transformations are possible,” says [Christina Hupy](https://gitlab.com/c_hupy), Sr. Manager, Community Programs. “As more of their data is input, businesses can draw actual insights and use AI to continuously improve their systems.”\n\nDeSanto agrees and predicts that [AI-assisted workflows will gain popularity](/blog/why-ai-in-devops-is-here-to-stay/) in application development. “AI/ML will further enable rapid development, security remediation, improved test automation, and better observability,” he says.\n\n[Taylor McCaslin](https://gitlab.com/tmccaslin), Group Manager of Product for Data Science, says that while AI/ML certainly isn’t new, making technologies such as open-ended AI accessible to consumers, set an expectation to figure out how it could be better used in software development (think code completion and other such tasks).\n\nHe predicts that while AI/ML will be used all along the SDLC, organizations will grapple with privacy concerns, preserving intellectual property (such as AI-generated code ownership) and permissiveness of licenses for training data sets and algorithms.\n\nAt the same time, he says to look for “more rapid development in the MLOps and DataOps spaces to help developers manage, maintain, and iterate on production software systems that leverage ML and AI.” (Note: GitLab is investing in our ModelOps stage to help support the development of data science-enriched software within the GitLab platform.)\n\n## Prediction 4: Value stream analytics will take on a greater role in organizations\n\nThe digital transformation that organizations will undergo this year will require a deeper commitment to [examining value streams](/blog/the-gitlab-quarterly-how-our-latest-beta-releases-support-developers/). “Value stream analytics will extend past development workflows to provide a more holistic view of the value organizations deliver to their users (both internal and external),” DeSanto says.\n\nExecutive leadership will seek out metrics that give insight into how digital transformation and technological investments are delivering value and driving business results. This is a shift from solely focusing on development efficiencies. The 2022 Global DevSecOps survey found that 75% of respondents are either using a DevOps platform or plan to move to one within a year with one of the drivers of this change being metrics and observability.\n\n## Prediction 5: Observability will shift left for efficient DevSecOps \n\n[Observability](/direction/monitor/platform-insights/) will also move further left in the SDLC, according to [Michael Friedrich](https://gitlab.com/dnsmichi), Senior Developer Evangelist. “Observability-driven development will enable everyone to become more efficient and inspire innovation,\" he says.\n\nNew observability-enabling technologies like [eBPF](https://ebpf.io/what-is-ebpf) will help developers with automated code instrumentation instead of adding more workload with manual code instrumentation. eBPF also supports better observability and security workflows in cloud-native environments.\n\nObservability will play a critical role in improving the efficiency of DevSecOps workflows, including CI/CD, infrastructure cost analysis, and trending/forecasting for better capacity planning.\n\n_What do you think will be the big DevSecOps technology advancements this year? Let us know your predictions in the comments below._\n\n## Engage with DevSecOps experts\n\nWant to dig deeper into how to innovate while still keeping an eye on cost efficiencies? Sign up for our webcast [“GitLab’s DevSecOps Innovations and Predictions for 2023”](https://page.gitlab.com/webcast-gitlab-devsecops-innovations-predictions-2023.html?utm_medium=blog&utm_source=gitlab&utm_campaign=devopsgtm&utm_content=fy23q4release) on Jan. 31 to get expert advice and insights about this era of DevSecOps transformation and the tools and strategies you’ll need to meet this challenge. \n[Register](https://page.gitlab.com/webcast-gitlab-devsecops-innovations-predictions-2023.html?utm_medium=blog&utm_source=gitlab&utm_campaign=devopsgtm&utm_content=fy23q4release) today!\n\nCover image by [Drew Beamer](https://unsplash.com/@dbeamer_jpg?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com/)\n{: .note}\n",[1128,750,771,836],{"slug":1728,"featured":6,"template":683},"whats-next-for-devsecops","content:en-us:blog:whats-next-for-devsecops.yml","Whats Next For Devsecops","en-us/blog/whats-next-for-devsecops.yml","en-us/blog/whats-next-for-devsecops",{"_path":1734,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1735,"content":1741,"config":1746,"_id":1748,"_type":16,"title":1749,"_source":18,"_file":1750,"_stem":1751,"_extension":21},"/en-us/blog/the-gitlab-quarterly-how-our-latest-beta-releases-support-developers",{"title":1736,"description":1737,"ogTitle":1736,"ogDescription":1737,"noIndex":6,"ogImage":1738,"ogUrl":1739,"ogSiteName":697,"ogType":698,"canonicalUrls":1739,"schema":1740},"The GitLab Quarterly: How our latest beta releases support developers","The Value Streams Dashboard and Remote Development provide the capabilities needed to support DevSecOps teams and stay competitive.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668367/Blog/Hero%20Images/innovation-unsplash.jpg","https://about.gitlab.com/blog/the-gitlab-quarterly-how-our-latest-beta-releases-support-developers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The GitLab Quarterly: How our latest beta releases support developers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dave Steer\"}],\n        \"datePublished\": \"2023-01-24\",\n      }",{"title":1736,"description":1737,"authors":1742,"heroImage":1738,"date":1743,"body":1744,"category":14,"tags":1745},[1305],"2023-01-24","\nIt’s easy to say that 2023 will be the year of innovation, but with the macroeconomic environment requiring an obsessive eye on cost efficiencies, and in some cases, cost-cutting, exactly how are organizations supposed to stay competitive when it comes to software development and delivery? The answer is clear: Stay focused on supporting your developers. Our two new beta releases help you do just that.\n\nThe GitLab Value Streams Dashboard, now available in private beta, ensures that all stakeholders have visibility, early and in real time, into the progress and value delivery metrics associated with software development and delivery. With everyone on the same page, discussions can be had and adjustments made before developers face obstacles or stall out waiting for decision-makers to get up to speed. Developers can also see, at-a-glance, their impact on the idea-to-customer value chain. The goal: Reduce idle time so that developers can spend more time developing and IT leaders can better unlock their transformation results. Keeping the creativity flowing can boost developer happiness and help provide a glide path for software to make its way into the market and add value. \n\nOur other beta release, GitLab Remote Development, can enable organizations to directly support developers by letting them establish an environment that best suits their needs, including where, when, and how they prefer to work. GitLab Remote Development doesn’t require developers to set up and manage local development environments, which keeps workflow distractions to a minimum. Stripping away location, device, and complex toolchain barriers can maximize developer satisfaction, which can lead to increased ingenuity and productivity.\n\nAn overarching aspect of this developer support is that it is available on a single DevSecOps platform so you don’t have to tack on something special to achieve these goals — the tools are all there and ready to be used to create better software faster.\n\nNow, let’s dig deeper into these capabilities and how they will help you support your developers and deliver value to your customers.\n\n## GitLab Value Streams Dashboard\n\nIn many conversations we have with customers, lack of visibility into metrics for software development value streams comes up as a pain point. Value streams – the process from idea to delivering customer value – should be the epicenter for understanding the progress, blockers, timelines, and costs associated with your development projects. Without this insight, innovation with an eye to cost efficiencies is virtually impossible. It is also difficult to properly support developers through fast, informed decision-making if everyone doesn’t have access to the same real-time data. \n\nThe GitLab Value Streams Dashboard gives stakeholders a bird's-eye view of their teams’ software delivery metrics (such as [DORA metrics](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html) and [flow metrics](https://docs.gitlab.com/ee/user/analytics/value_stream_analytics.html)) for continuous improvement. DevSecOps teams can identify and fix inefficiencies and bottlenecks in their software delivery workflows, which can improve the overall productivity and stability of their development environment. \n\n> \"Our team is excited to try out the DORA metrics capabilities available in the private beta for the new Value Streams Dashboard. We look forward to using other widgets as the Value Streams Dashboard matures, which we hope will greatly improve our productivity and efficiency.\"  \n> _**Rob Fulwell, Staff Engineer, Conversica**_\n\nThe first iteration of the GitLab Value Streams Dashboard enables teams to continuously improve software delivery workflows by benchmarking key DevOps metrics to help improve productivity, efficiency, scalability, and performance. Tracking and comparing these metrics over a period of time helps teams catch downward trends early, drill down into individual projects/metrics, take remedial actions to maintain their software delivery performance, and track progress of their innovation investments.\n\nLeadership can support developers by using information from the dashboard to cross-pollinate and promote best practices, add resources to projects based on metrics, and eliminate common bottlenecks across projects. \n\n\n\n### Roadmap for Value Streams Dashboard\n\nWe are just getting started with delivering capabilities in our Value Streams Dashboard. The roadmap includes planned features and functionality that will continue to improve decision-making and operational efficiencies.\n\nHere are some of the capabilities we plan to focus on next:\n\n1. New visualizations such as overview widgets, [top view treemap](https://gitlab.com/gitlab-org/gitlab/-/issues/381306), and [DORA performance score chart](https://gitlab.com/gitlab-org/gitlab/-/issues/386843)\n2. Security and vulnerability benchmarking  to enable executives to better understand an organization’s security exposure \n3. A new [data warehouse](https://gitlab.com/groups/gitlab-org/-/epics/9318?_gl=1*1orel9k*_ga*ODExMTUxMDcwLjE2Njk3MDM3Njk.*_ga_ENFH3X7M5Y*MTY3MjkxMTgxMC43Ny4xLjE2NzI5MTI0MTIuMC4wLjA.) that supports fast analytical queries and deep data analysis\n4. Additional business value metrics such as adoption, OKRs, revenue, costs, CSAT that align technical and business goals\n\n[Learn more on our direction page](/direction/plan/value_stream_management/).\n\n### Join the beta: We welcome your contributions\n\nAs we iterate on this new offering, GitLab Premium and Ultimate customers are invited to [join our private beta](https://about.gitlab.com/value-streams-dashboard).\n\nWe also invite you to learn more about [Value Streams Dashboard](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html) and [follow along](https://gitlab.com/groups/gitlab-org/-/epics/9317) on the timeline to General Availability.\n\n## GitLab Remote Development\n\nThe increasing adoption of reproducible, ephemeral, cloud-based development environments has accelerated software development. But for developers, frequent context-switching between different environments, navigating complex and extensive toolchains, and managing a local development environment can create friction. GitLab Remote Development helps organizations better support developers by enabling them to spend less time managing their development environment and more time contributing high-quality code.\n\n> \"While a number of stakeholders are critical to successful DevOps, software developers are key for a successful DevOps implementation. Thus, organizations must adequately support developers. This means providing good developer experiences that are not disruptive or intrusive, but that are nonetheless sanctioned by the company, and that remain secure and compliant through automation and abstraction.\"  \n> _**Jay Lyman, 451 Research, a part of S&P Global Market Intelligence, \"Traditional IT teams, leadership stand out as additional DevOps stakeholders – Highlights from VotE: DevOps,\" January 4, 2023**_ \n\nThe centerpiece of GitLab Remote Development is our newly released Web IDE Beta, now the default web IDE experience on GitLab. The Web IDE makes it possible to securely connect to a remote development environment, run commands in an interactive terminal panel, and get real-time feedback from right inside the Web IDE. Understanding that developer familiarity is important, the Web IDE Beta uses a more powerful VS code interface and is able to handle many of the most frequently performed tasks on the existing Web IDE, including committing changes to multiple files and reviewing merge request diffs.\n\nGitLab Remote Development also creates a more secure development experience by enabling organizations to implement a [zero-trust policy](/blog/why-devops-and-zero-trust-go-together/) that prevents source code and sensitive data from being stored locally across numerous developer devices. In addition, organizations can adhere to compliance requirements by ensuring developers are working with approved environments, libraries, and dependencies. \n\nIt’s interesting to note that we deployed the Web IDE beta turned on as default and currently 99.9% of users have kept it toggled on. I encourage you to learn more about the [new Web IDE functionality](/blog/get-ready-for-new-gitlab-web-ide/) in our recent blog post. \n\n### Roadmap for Remote Development\n\nAs iteration continues on the GitLab remote development experience, the roadmap currently focuses on the following functionality next: \n\n1. Provision instances of remote development environments on demand in the customer’s choice of cloud provider.\n2. Allow teams to share complex, multi-repo environments.\n3. Connect from a variety of IDEs, including VS Code, JetBrains, Vim, or the Web IDE.\n4. Ensure an organization’s remote environments conform to its software supply chain security requirements with advanced security tools, authorization, reports, and audit logs.\n\n[Learn more on our direction page](/direction/create/ide/remote_development/).\n\n## Engage with DevSecOps experts\n\nWant to dig deeper into how to innovate while still keeping an eye on cost efficiencies? Join me for our webcast “[GitLab’s DevSecOps Innovations and Predictions for 2023](https://page.gitlab.com/webcast-gitlab-devsecops-innovations-predictions-2023.html?utm_medium=blog&utm_source=gitlab&utm_campaign=devopsgtm&utm_content=fy23q4release)” on Jan. 31 to get expert advice and insights about this era of DevSecOps transformation and the tools and strategies you’ll need to meet this challenge. \n\n[Register today](https://page.gitlab.com/webcast-gitlab-devsecops-innovations-predictions-2023.html?utm_medium=blog&utm_source=gitlab&utm_campaign=devopsgtm&utm_content=fy23q4release)!\n\n**Disclaimer**: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.\n\n\n_Cover image by [Skye Studios](https://unsplash.com/@skyestudios?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com)_\n  \n",[1128,9,728,836],{"slug":1747,"featured":6,"template":683},"the-gitlab-quarterly-how-our-latest-beta-releases-support-developers","content:en-us:blog:the-gitlab-quarterly-how-our-latest-beta-releases-support-developers.yml","The Gitlab Quarterly How Our Latest Beta Releases Support Developers","en-us/blog/the-gitlab-quarterly-how-our-latest-beta-releases-support-developers.yml","en-us/blog/the-gitlab-quarterly-how-our-latest-beta-releases-support-developers",{"_path":1753,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1754,"content":1760,"config":1766,"_id":1768,"_type":16,"title":1769,"_source":18,"_file":1770,"_stem":1771,"_extension":21},"/en-us/blog/try-out-new-way-to-migrate-projects",{"title":1755,"description":1756,"ogTitle":1755,"ogDescription":1756,"noIndex":6,"ogImage":1757,"ogUrl":1758,"ogSiteName":697,"ogType":698,"canonicalUrls":1758,"schema":1759},"Moving projects easily: GitLab migration automation benefits","Learn how our new direct transfer feature, in beta, is speeding migrations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668857/Blog/Hero%20Images/migration.jpg","https://about.gitlab.com/blog/try-out-new-way-to-migrate-projects","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab project migration and automation - a perfect pair for faster, easier transfers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Magdalena Frankiewicz\"}],\n        \"datePublished\": \"2023-01-18\",\n      }",{"title":1761,"description":1756,"authors":1762,"heroImage":1757,"date":1763,"body":1764,"category":14,"tags":1765},"GitLab project migration and automation - a perfect pair for faster, easier transfers",[1227],"2023-01-18","\n\nSince Version 14.3, GitLab has supported [migrating GitLab groups by direct transfer](https://docs.gitlab.com/ee/user/group/import/#migrate-groups-by-direct-transfer-recommended), where, rather than manually uploading export files, data is transferred directly from the source instance to the destination instance. We have been working to extend this functionality to projects and are including the ability to migrate projects by direct transfer as a beta in GitLab 15.8.\n\nThis beta feature is **available to everyone**, enabled by default on GitLab.com and with [some configuration](#availability-of-the-feature)\non self-managed GitLab instances.\n\n## Benefits of the direct transfer method\n\nMigrating by direct transfer enables you to easily migrate GitLab group and project resources between GitLab instances and within the same GitLab\ninstance, using either the UI or API.\n\nThis is a major improvement from migrating [groups](https://docs.gitlab.com/ee/user/group/import/#migrate-groups-by-uploading-an-export-file-deprecated) and [projects using file exports](https://docs.gitlab.com/ee/user/project/settings/import_export.html) because:\n\n- You don't need to manually export each individual group and project to a file and then import all those export files to a new location. Now any top-level group you have the Owner role for (plus subgroups when using API) and all its projects can be migrated automatically, making your work more efficient.\n- When migrating from GitLab Self-Managed to GitLab.com, user associations (such as comment author) previously were linked to the user who ran the import. Migration using direct transfer maps users and their contributions correctly, provided [a few conditions are met](https://docs.gitlab.com/ee/user/group/import/#preparation).\n\n## Availability of the feature\n\nThe beta release for migrating GitLab projects with top-level groups by direct transfer is available on GitLab.com. You can migrate from a self-managed GitLab instance to GitLab.com or within GitLab.com right now!\n\nGitLab Self-Managed users have access to migrating projects by direct transfer beta, too. Administrators need to enable:\n\n- an [application setting](https://docs.gitlab.com/ee/administration/settings/visibility_and_access_controls.html#enable-migration-of-groups-and-projects-by-direct-transfer) for migrating groups\n~~- the `bulk_import_projects` [feature flag](https://docs.gitlab.com/ee/administration/feature_flags.html), for migrating projects in the groups~~\n\nWe have removed that feature flag in GitLab 15.10, so only the application setting needs to be enabled.\n\nThis change enables GitLab Dedicated instances to take advantage of the feature.\n\nWe recommend upgrading self-managed instances to the latest version possible before migrating groups and projects.\n\n## Trying the new feature out\n\nTo get started with the new feature, you can either [read the documentation](https://docs.gitlab.com/ee/user/group/import/#migrate-groups-by-direct-transfer-recommended) or follow the\nsteps below.\n\n1. Make sure the [feature is available](#availability-of-the-feature) to you.\n1. Generate or copy a [personal access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html) with the `api` scope on your source GitLab instance. Both `api` and `read_repository` scopes are required when migrating from GitLab 15.0 and earlier.\n1. On the top navigation, select **+**, then **New group**, and then **Import group**.\n1. Enter the URL of your source GitLab instance.\n1. Enter the personal access token for your source GitLab instance and select **Connect instance**.\n  ![Screenshot of connecting the source instance](https://about.gitlab.com/images/blogimages/migrate-gitlab-projects-images/connect-source-instance.png){: .shadow}\n1. Select the groups to import from the top-level groups on the connected source instance you have the Owner role for. All the projects within chosen groups can be migrated too! Choose from the dropdown the group you want to migrate to for each group you have selected. Adjust the newly created group name, if needed.\n  ![Screenshot of choosing groups to import](https://about.gitlab.com/images/blogimages/migrate-gitlab-projects-images/choose-groups-to-import.png){: .shadow}\n1. Next to the groups you want to import, select **Import with projects**. The **Status** column shows the import status of each group. If you leave the page open, it updates in real time.\n1. After a group has been imported, select its GitLab path to open the imported group.\n\nFor more information about migrating by direct transfer (for example, what resources are migrated and [group import history](https://docs.gitlab.com/ee/user/group/import/index.html#group-import-history)), see our [documentation](https://docs.gitlab.com/ee/user/group/import/index.html).\n\n## What about migrating projects using file exports? \n\nOnce the migrating projects by direct transfer feature is ready for production use at any scale, migrating groups and projects using file exports\nwill be disabled by a feature flag and only migrating groups and projects by direct transfer will be available in the UI and API.\n\nBecause migrating by direct transfer requires network connection between instances or GitLab.com, customers that are using air-gapped networks with no\nnetwork connectivity between their GitLab instances will need to reenable migrating using file exports. They will be able to use migrating groups and\nprojects by direct transfer after we extend this solution to [also support offline instances](https://gitlab.com/groups/gitlab-org/-/epics/8985).\n\nWe will not fully remove migrating using file exports until we support all our customers with a new solution.\n\n## What's next for migrating by direct transfer method \n\nOf course, we're not done yet! We will be improving the direct transfer method before we come out of beta. We're working on:\n\n- Making the migration [efficient](https://gitlab.com/groups/gitlab-org/-/epics/8983) and [reliable](https://gitlab.com/groups/gitlab-org/-/epics/8927)\n  for large projects.\n- Improving [feedback during migration and when migration is finished](https://gitlab.com/groups/gitlab-org/-/epics/8984).\n\nNext, we will be focusing on:\n\n- Enabling more granular imports, where you'll be able to:\n  - Migrate any group in the UI, not only top-level ones. Migrating subgroups is currently limited to the API.\n  - Choose which projects within a group you want to migrate.\n- Importing [project relations not yet included in migration](https://gitlab.com/groups/gitlab-org/-/epics/9319).\n- Automatically [migrating users](https://gitlab.com/groups/gitlab-org/-/epics/4616).\n\nDetails about the migrating by direct transfer roadmap can be found on our [direction page](https://about.gitlab.com/direction/manage/import_and_integrate/importers/).\n\nWe are excited about this roadmap and hope you are too! We want to hear from you. What's the most important missing piece for you? What else can we improve? Let us know in the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/284495) and we'll keep iterating!\n\n**Disclaimer:** This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.\n\n_Cover photo by [Chris Briggs](https://unsplash.com/@cgbriggs19?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com)_\n",[836,837,680,727],{"slug":1767,"featured":6,"template":683},"try-out-new-way-to-migrate-projects","content:en-us:blog:try-out-new-way-to-migrate-projects.yml","Try Out New Way To Migrate Projects","en-us/blog/try-out-new-way-to-migrate-projects.yml","en-us/blog/try-out-new-way-to-migrate-projects",{"_path":1773,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1774,"content":1780,"config":1787,"_id":1789,"_type":16,"title":1790,"_source":18,"_file":1791,"_stem":1792,"_extension":21},"/en-us/blog/devsecops-platforms-help-smbs-scale-as-they-grow",{"title":1775,"description":1776,"ogTitle":1775,"ogDescription":1776,"noIndex":6,"ogImage":1777,"ogUrl":1778,"ogSiteName":697,"ogType":698,"canonicalUrls":1778,"schema":1779},"DevSecOps platforms help SMBs scale as they grow","Adopting a comprehensive platform early lets smaller businesses mature with best practices.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668641/Blog/Hero%20Images/smbscale.jpg","https://about.gitlab.com/blog/devsecops-platforms-help-smbs-scale-as-they-grow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevSecOps platforms help SMBs scale as they grow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2023-01-17\",\n      }",{"title":1775,"description":1776,"authors":1781,"heroImage":1777,"date":1782,"body":1783,"category":14,"tags":1784},[1364],"2023-01-17","\nFor startups and small to medium-sized businesses (SMBs) working to expand their customer base, revenue, and standing in their industries, adopting a [DevSecOps](/topics/devsecops/) platform is one move that can help make all of that growth happen. \n\nThe trick is to migrate to a single, end-to-end platform when the organization is small, so bad habits are avoided early on and constructive processes can be built in and scale as the business grows. A DevSecOps platform enables small businesses to set up an environment and work processes that help them avoid common pitfalls that can come with growth.\n\n## How DevSecOps platforms help SMBs scale\n\nHere are a few ways a DevSecOps platform can help smaller businesses and startups scale:\n\n### Reducing complexity\n\nWhen someone is on a small IT team, the last thing they need is something complicating their job and taking up their precious time. And if they are stitching together multiple tools, they end up creating a [clumsy, ad-hoc toolchain](/blog/battling-toolchain-technical-debt/). That by its very nature forces DevOps professionals to wrestle with a chaotic environment that leads to bottlenecks and requires constant management, tweaking, updating, and switching between interfaces. All of that toolchain care and feeding comes at the expense of simply focusing on delivering code that drives the organization’s bottom line. \n\n### Avoiding silos\n\nMaybe a company is small enough that silos aren’t a problem... right now. But as the business grows, silos likely will grow along with it, causing problems. Silos mean people are heads down working on their own project, or even worse, their own part of a project, without any visibility into the rest of it, or the ability to comment or share their work. It’s easy to create silos if you’re not using a DevSecOps platform because people often naturally separate off into single-minded groups that do not communicate with or understand each other. DevSecOps platforms foster collaboration, making it easier to keep silos from forming in the first place. They create a working environment open to communication and collaboration. A platform will give people the ability to work together, and that collective effort will produce better software. \n\n### Increasing collaboration\n\nAdopting a single, end-to-end platform when a company is small or when a startup is just getting off the ground will enable and encourage everyone in the business (from IT to finance, marketing, and sales) to work together. And it’s easier to create [a collaborative culture](/blog/why-devops-collaboration-continues-to-be-important/) from the very beginning, when working together can become a habit – a normal means of operation. Instilling an environment of communication also is less disruptive and easier to manage in a company of 10, 25, or even 100 employees than in a much larger and complex business. Collaboration also will encourage innovation by bringing in ideas from people in a range of demographics and business interests. Innovative ideas will help businesses grow into more successful and larger companies.\n\n### Decreasing hands-on work\n\nBecause startups and SMBs have fewer IT people, let alone teams of DevOps professionals, the [automation](/blog/how-automation-is-making-devops-pros-jobs-easier/) that is an integral part of a DevSecOps platform eases their burden by decreasing the amount of hands-on work they have to do. With automation for jobs like backup, installation, and security testing built in, people spend less of their already-limited time needlessly repeating time-consuming tasks, or going back in the software lifecycle to find where a security bug was introduced. Automating tasks required for everything from design to build, test, and deployment also can reduce the potential for human error and provide consistency throughout the software lifecycle. By taking those jobs off DevSecOps teams' plates, they have more time to actually build and deploy innovative software and support the business. \n\nLet’s be clear: A startup or SMB isn’t too small for a DevSecOps platform. If an organization is building software, it needs a platform. Business executives don’t want to struggle to grow and look back regretfully and think, “Why didn’t I adopt a DevSecOps platform earlier?”\n\n“If you’re on a small team or even just a team of one, migrating could seem like a lot to take on,” says [Fatima Sarah Khalid](/company/team/#sugaroverflow), a developer evangelist at GitLab. “But it’s worth the effort to set yourself up for growth. With a platform, everyone in the company is able to work in the same environment on the same projects. That means a collaborative environment without silos is formed early and the business can grow with that culture, instead of trying to adopt it years down the road when bad work habits have already formed.”\n\nWith GitLab’s single, end-to-end DevSecOps platform, automation is a system feature and not something that has to be added in. It also helps organizations eliminate or even keep silos from forming, increases collaboration and communication, and decreases the complexities that are born of DIY toolchains.\n\n**Download our [ebook](https://page.gitlab.com/resources-ebook-trading-diy-devops-for-a-single-platform-smb.html)** to learn about the benefits of migrating from a toolchain to GitLab’s DevSecOps platform. \n\n_Cover image by [Markus Spiske](https://unsplash.com/de/@markusspiske?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com)_\n",[1128,1231,1785,1786],"demo","growth",{"slug":1788,"featured":6,"template":683},"devsecops-platforms-help-smbs-scale-as-they-grow","content:en-us:blog:devsecops-platforms-help-smbs-scale-as-they-grow.yml","Devsecops Platforms Help Smbs Scale As They Grow","en-us/blog/devsecops-platforms-help-smbs-scale-as-they-grow.yml","en-us/blog/devsecops-platforms-help-smbs-scale-as-they-grow",{"_path":1794,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1795,"content":1800,"config":1805,"_id":1807,"_type":16,"title":1808,"_source":18,"_file":1809,"_stem":1810,"_extension":21},"/en-us/blog/devsecops-platforms-give-smbs-security-muscle",{"title":1796,"description":1797,"ogTitle":1796,"ogDescription":1797,"noIndex":6,"ogImage":1640,"ogUrl":1798,"ogSiteName":697,"ogType":698,"canonicalUrls":1798,"schema":1799},"DevSecOps platforms give SMBs security muscle","A single platform enables teams to build, test, and deploy secure software with fewer resources.","https://about.gitlab.com/blog/devsecops-platforms-give-smbs-security-muscle","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevSecOps platforms give SMBs security muscle\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2023-01-10\",\n      }",{"title":1796,"description":1797,"authors":1801,"heroImage":1640,"date":1802,"body":1803,"category":14,"tags":1804},[1364],"2023-01-10","\nDevOps professionals with both security training and experience come at a high price and can be hard to find. That makes it especially difficult for startups and small and medium-sized businesses (SMBs), which generally don’t have deep pockets, to get the security professionals they need.\n\nSmaller businesses often end up with no security team, so they have to hire consultants. Even worse, they might end up having little to no security help at all, which will cause problems for their customers as well as their own business.\n\nOne efficient [way to deal with that](/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform/) is to adopt a DevSecOps platform, which enables organizations to build, test, and deploy secure software with fewer hands and [expenses](/blog/how-smbs-can-save-with-gitlabs-devops-platform/).\n\n“Someone in an SMB likely doesn’t have enough expertise, or even enough people, in-house to handle every part of DevOps, so they end up having to hire a contractor or consultant to take on things like security and monitoring, and that strains their budget,” says [Fatima Sarah Khalid](https://gitlab.com/sugaroverflow), a developer evangelist at GitLab. “By adopting the GitLab DevSecOps Platform, they can more easily handle this work, despite limited resources.”\n \n## Four benefits for SMBs\n\nSo how does a complete DevSecOps platform add security muscle to a small business? \n\n### 1. Finding vulnerabilities early\n \nWith a single, end-to-end platform, [security is integrated throughout](/stages-devops-lifecycle/secure/), and not just bolted on as an afterthought. With capabilities like dynamic and static application security testing, vulnerability management, and dependency and container scanning, developers can find vulnerabilities earlier in the process when they often can be more easily and quickly fixed. By shifting security left this way, teams can perform threat and vulnerability analysis as developers create the code - not when it’s about to be deployed. Shifting security left also creates more secure software, and decreases the time it would have taken to track down a problem created much earlier in the process.\n \n### 2. Easing work with automation\n \nAutomation, which is built into a single DevSecOps platform, is critical because it brings consistency and repeatability to the entire software lifecycle, reducing the potential for human error and minimizing the introduction of bugs and risks. And that enables SMBs to produce more secure software for their own organizations, as well as for their customers.\n \nAnother major advantage of automation is that it minimizes the need for a lot of extra hands-on and time-consuming work, like code reviews and testing. Startups and small businesses, by nature, have smaller DevOps teams. They might even have an IT team of one or two people, who do everything from building software to serving as the help desk. Saving them from having to do repetitive manual work gives them back precious time they can spend on more innovative and productive jobs.\n \nAll of that automated testing is automatically logged and documented, helping organizations create easily searchable and useful best practices that will help speed future software builds.\n \n### 3. Ensuring compliance\n \nSMBs and companies just getting off the ground don’t want to get tripped up by tricky and costly compliance issues. Luckily, the same end-to-end platform enables teams to verify the compliance of their code without leaving their workflow. In GitLab, for example, compliance confirmation lives within the platform and is automated. Developers don't have to context-switch among different point solutions, boosting their productivity and efficiency. Automating compliance also removes one more task from [developers’ already busy schedules](/blog/ease-pressure-on-smb-developers-with-a-devops-platform/).\n \n### 4. Establishing security imperatives\n \nA DevSecOps platform gives SMBs speed and efficiency, without requiring them to string together various security tools or hire security consultants. With a platform, because security practices and automation are integrated from the very start, an SMB’s DevOps environment has a solid security foundation. One solution. One answer to security needs.\n\n## Meeting the security need\n \nIn today’s environment, security and compliance are business imperatives. There’s no getting around it.\n\nSo having a strategic, end-to-end platform approach, where security and compliance are embedded from planning to production, provides efficiency and value unmatched by traditional, third-party application security vendors. Companies that may be using DevOps but are only tacking together different tools simply aren’t getting the security advantages that come from a single DevSecOps application.\n\nStartups and SMBs have a steep hill to climb just to survive. Between March 2020 and March 2021, 1 million small businesses opened in the U.S., but 833,458 closed, according to the U.S. Small Business Administration. And in a volatile economic climate, survival gets even tougher. Today’s high inflation rates and market instability have small businesses bracing for economic uncertainties, according to the [MetLife & U.S. Chamber of Commerce Small Business Index](https://www.uschamber.com/sbindex/summary).\n\nAnd those numbers are just about sheer survival. That’s not to mention actually gaining a solid foothold in an organization’s industry, attracting loyal customers, and successfully taking on bigger competitors, which just makes the hill small businesses are climbing even steeper.\n\nPrepare to make that climb easier by migrating to a single, end-to-end platform. [Download our SMB-focused ebook](https://page.gitlab.com/resources-ebook-trading-diy-devops-for-a-single-platform-smb.html) learn the advantages to moving from a DIY DevOps toolchain to GitLab’s platform.\n",[1128,9,750,771],{"slug":1806,"featured":6,"template":683},"devsecops-platforms-give-smbs-security-muscle","content:en-us:blog:devsecops-platforms-give-smbs-security-muscle.yml","Devsecops Platforms Give Smbs Security Muscle","en-us/blog/devsecops-platforms-give-smbs-security-muscle.yml","en-us/blog/devsecops-platforms-give-smbs-security-muscle",{"_path":1812,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1813,"content":1818,"config":1824,"_id":1826,"_type":16,"title":1827,"_source":18,"_file":1828,"_stem":1829,"_extension":21},"/en-us/blog/visualizing-incident-management-metrics",{"title":1814,"description":1815,"ogTitle":1814,"ogDescription":1815,"noIndex":6,"ogImage":1060,"ogUrl":1816,"ogSiteName":697,"ogType":698,"canonicalUrls":1816,"schema":1817},"Visual guide to incident metrics","Learn what incident metrics are and how they contribute to better performance and response times.","https://about.gitlab.com/blog/visualizing-incident-management-metrics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Visual guide to incident metrics\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Alana Bellucci\"}],\n        \"datePublished\": \"2023-01-09\",\n      }",{"title":1814,"description":1815,"authors":1819,"heroImage":1060,"date":1821,"body":1822,"category":14,"tags":1823},[1820],"Alana Bellucci","2023-01-09","\n\nIncident metrics are a set of standard, quantifiable measurements used to track the incident response process. Accurately tracking these metrics will help DevSecOps teams understand how they are performing and whether responses to unplanned outages are getting better or worse. Decreasing the time to detect, respond, mitigate, and recover from an incident decreases the impact of an incident on customers as well as the cost of the incident to the business overall. \n\nHow are these metrics captured and recorded? Let's backtrack a bit and define five key timestamps to capture during an incident that will enable teams to measure the relevant incident metrics.\n\n1. First product impact (`Start time`): When a service starts degrading or metrics begin deviating from the norm.\n2. Time to detection (`Impact detected`): When the operator becomes aware of the problem.\n3. Time to respond (`Response initiated`): When the operator starts to address the problem. \n4. Time to mitigate (`Impact mitigated`): When there is no longer severe product impact. The system may still be degraded in some way.\n5. Time to recovery (`End time`): When the system has fully recovered and is operating normally. Note: Sometimes recovery and mitigation are the same, but sometimes they are different. Time to recovery is the same as the DORA metric [time to restore service](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html#time-to-restore-service): time an incident was open in a production environment over the given time period.\n\nIt is important to create a single source of truth for what actually happened during a given incident, and GitLab enables users to do that easily by creating incident timelines. Throughout an incident, there are many conversations, meetings, and investigations to determine what's going on and how to recover. However, only some key pieces of what happened during the incident need to be identified to build an incident timeline, and these timeline items are the building blocks of the important incident metrics.  \n\nLet's walk through an example of how that could work:\n\n- When an alert is triggered, Sally, the engineer on call, gets paged.\n- Sally is in the middle of breakfast and doesn't hear the first page.\n- When she gets paged again, she picks up her phone (`impact detected`) and starts to investigate.\n- After taking a closer look, it looks like something isn't working as expected and she declares an incident.\n- After reviewing the metrics that triggered the alert, she notices this has been happening for 8 minutes.\n- Sally determines that the true `start time` of the incident was 8 minutes before the first alert.\n\nSo far, two important timeline events have happened. By applying the `start time` and `impact detected` tags to your incident timeline, you can measure the time to detection, which is the difference between these two timestamps.\n\n![time_to_detection](https://about.gitlab.com/images/blogimages/incident-mgmt/time_to_detection.png)\n\nAfter Sally has had a chance to start investigating the alert, she reaches out to team members that can help, sets up a video conference call, and starts to determine the root cause. The `response is initiated`. \n\nThe response team is seeing multiple reports from customers and internal users that traffic is absent. After taking a closer look at the alert and recent changes, the team is able to determine that this incident originates from a recent deployment. Sally coordinates with the Release Manager to roll back the deployment. Once the rollback is complete, the incident has been mitigated. So far we've captured two key metrics, time to detect and time to mitigate.\n\n![time_to_mitigation](https://about.gitlab.com/images/blogimages/incident-mgmt/time_to_mitigation.png)\n\nOnce the changes from the bad deployment have been reverted, Sally continues to monitor for any additional alerts, irregular logs, or reports that traffic is absent. Things continue to look like they are working as expected, marking the `end time` of the incident and declaring the incident _resolved_. (In this example, the time to mitigate is the same as the time to recover since the rolled back deployment restored services.) Sally starts working on creating/determining corrective actions and investigating the total impact that users experienced during the incident. Once these closing tasks are complete, the incident is closed.\n\n![time_to_recovery](https://about.gitlab.com/images/blogimages/incident-mgmt/time_to_recovery.png)\n\nAt GitLab we've built [incident timelines](https://docs.gitlab.com/ee/operations/incident_management/incident_timeline_events.html#view-the-timeline) on the [incident](https://docs.gitlab.com/ee/operations/incident_management/incidents.html) issue type as a first step towards tracking important incident metrics.  We are currently building an MVC for [incident tags](https://gitlab.com/groups/gitlab-org/-/epics/8741) so incident response teams can capture relevant incident timestamps during an incident and add them to the incident timeline.  To learn more about how incident timelines can help your team during an incident, check out [How to leverage GitLab incident timelines](https://about.gitlab.com/blog/gitlab-incident-timelines/).\n\nSpecial thanks to GitLab's talented Product Designer [Amelia Bauerly](https://gitlab.com/ameliabauerly) who illustrated the examples in this blog post.\n",[680,1428,836],{"slug":1825,"featured":6,"template":683},"visualizing-incident-management-metrics","content:en-us:blog:visualizing-incident-management-metrics.yml","Visualizing Incident Management Metrics","en-us/blog/visualizing-incident-management-metrics.yml","en-us/blog/visualizing-incident-management-metrics",{"_path":1831,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1832,"content":1838,"config":1843,"_id":1845,"_type":16,"title":1846,"_source":18,"_file":1847,"_stem":1848,"_extension":21},"/en-us/blog/how-automation-is-making-devops-pros-jobs-easier",{"title":1833,"description":1834,"ogTitle":1833,"ogDescription":1834,"noIndex":6,"ogImage":1835,"ogUrl":1836,"ogSiteName":697,"ogType":698,"canonicalUrls":1836,"schema":1837},"How automation is making DevOps pros’ jobs easier","Six ways automation in a DevSecOps platform aids security, monitoring, compliance, and CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662504/Blog/Hero%20Images/devsecops-automated-security.jpg","https://about.gitlab.com/blog/how-automation-is-making-devops-pros-jobs-easier","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How automation is making DevOps pros’ jobs easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-12-12\",\n      }",{"title":1833,"description":1834,"authors":1839,"heroImage":1835,"date":1840,"body":1841,"category":14,"tags":1842},[1364],"2022-12-12","\nAs DevOps professionals look for ways to save time, money, and tech muscle as they work to push better and more secure software out the door, they’re increasingly seeing the advantages of automation — and that those advantages seamlessly come with adopting an end-to-end [DevSecOps](/topics/devsecops/) platform. \n\nIn a 2022 GitLab quiz, more than 82% of respondents said automation plays a “vital” role in developing and deploying safer and faster releases. \n\nIt’s clear that DevOps professionals are realizing that automation minimizes the need for a lot of extra hands-on and time-consuming work, like backup, installation, and maintenance. It also can reduce the potential for human error and provide consistency. A DevSecOps platform, unlike a cobbled-together [DIY toolchain](https://page.gitlab.com/resources-ebook-trading-diy-devops-for-a-single-platform.html), offers many advantages, like visibility and collaboration. Another major benefit is that it offers automation for everything from alerts to [testing](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/) and monitoring.\n\n## Benefits of DevSecOps automation\n\nHere is how automation throughout the software lifecycle could help DevOps teams cut time and money spent on repetitive tasks, eliminate human errors, and streamline the whole DevOps process:\n\n1. Security – A critical benefit of migrating to a full DevSecOps platform is that software won’t simply get a security test at the end of the pipeline – an inefficient, and often costly, feedback system. When [security is shifted left](/blog/efficient-devsecops-nine-tips-shift-left/), if a vulnerability or compliance issue is introduced into the code, it’s identified almost immediately thanks to automated and consistent testing. Automation built into a DevOps platform leads to better software and reduces the time between designing new, higher-quality features and rolling them out into production. And that maximizes the overall return on software development.\n\n2. Compliance – With a single DevSecOps application, [compliance confirmation](/stages-devops-lifecycle/govern/) lives within the platform and is automated. That means professionals can verify the compliance of their code without leaving their workflow, removing the need for compliance managers to require developers to context switch among different point solutions in a DIY toolchain, which can lead to the loss of productivity and efficiency. \n\n3. Configuration – It’s a complicated job to set up, manage, and maintain application environments. [Automated configuration management](/stages-devops-lifecycle/configure/) is designed to handle these complex environments across servers, networks, and storage systems.\n\n4. Continuous integration (CI) – This is the step that enables the DevOps practice of iteration by committing changes to a shared source code repository early and often – often several times a day. [CI](/blog/basics-of-gitlab-ci-updated/) is all about efficiency. By automating manual work and testing code more frequently, teams can iterate faster and deploy new features with fewer bugs more often.\n\n5. Continuous delivery (CD) – This is a software development process that works in conjunction with continuous integration to automate the application release process. When [deployments are handled automatically](/blog/cd-automated-integrated/), software release [processes are low-risk, consistent, and repeatable](/blog/boring-solutions-faster-iteration/). \n\n6. Monitoring – This is a proactive, automated part of the process, focused on tracking software, infrastructure, and networks to trace status and raise alerts to problems. [Monitoring](/stages-devops-lifecycle/monitor/) increases security, reliability, and agility. \n\n## Automation by the numbers\n\nIn fact, the [GitLab 2022 Global DevSecOps Survey](https://learn.gitlab.com/dev-survey-22/2022-devsecops-report), which polled more than 5,000 DevSecOps professionals, showed that automation is becoming increasingly critical to all DevOps teams.\n\nThe survey found that 47% of teams report their testing is fully automated today, up from 25% last year. Another 21% plan to roll out test automation at some point in 2022, and 15% hope to do so in the next two or more years. And three-quarters of respondents told us their teams use a DevSecOps platform or plan to use one this year. \n\nWhy are they using a platform? Well, security professionals called out easier automation and more streamlined deployments.\n\n## Fewer repetitive and unnecessary tasks\n\nSo what is all of this automation enabling DevOps professionals to do? They’re able to let go of a lot of work. \n \nAccording to the DevSecOps Survey, respondents said they’ve been able to reduce a lot of repetitive tasks. For instance, they say they no longer have to do as much infrastructure “handholding” — they’re not manually testing their code, writing messy code, and ignoring code quality. \n \nWith automation, each task is performed identically and with consistency, reliability, and accuracy. This promotes speed and increases deliveries, and, ultimately, deployments. While it doesn’t remove humans from the picture, automation minimizes dependency on humans for managing recurring tasks. \n\nAnd with GitLab’s single, end-to-end DevSecOps platform, automation is a system feature and not something that has to be added in. Automation with the GitLab platform is ready to go. Check out the [“Ditching DIY DevOps for GitLab’s Single Platform”](https://page.gitlab.com/resources-ebook-trading-diy-devops-for-a-single-platform.html) to learn more ways a platform can help DevOps teams.\n",[1128,1428,793,1369],{"slug":1844,"featured":6,"template":683},"how-automation-is-making-devops-pros-jobs-easier","content:en-us:blog:how-automation-is-making-devops-pros-jobs-easier.yml","How Automation Is Making Devops Pros Jobs Easier","en-us/blog/how-automation-is-making-devops-pros-jobs-easier.yml","en-us/blog/how-automation-is-making-devops-pros-jobs-easier",{"_path":1850,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1851,"content":1857,"config":1863,"_id":1865,"_type":16,"title":1866,"_source":18,"_file":1867,"_stem":1868,"_extension":21},"/en-us/blog/a-snapshot-of-modern-devops-practices-today",{"title":1852,"description":1853,"ogTitle":1852,"ogDescription":1853,"noIndex":6,"ogImage":1854,"ogUrl":1855,"ogSiteName":697,"ogType":698,"canonicalUrls":1855,"schema":1856},"A snapshot of modern DevOps practices today","We consulted three market research firms for their take on DevOps today and in the future. Here's what they said about modern DevOps practices.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668495/Blog/Hero%20Images/how-you-can-help-shape-the-future-of-securing-applications-at-gitlab.jpg","https://about.gitlab.com/blog/a-snapshot-of-modern-devops-practices-today","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A snapshot of modern DevOps practices today\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-10-31\",\n      }",{"title":1852,"description":1853,"authors":1858,"heroImage":1854,"date":1860,"body":1861,"category":14,"tags":1862},[1859],"Valerie Silverthorne","2022-10-31","At almost 15 years old, DevOps has been around long enough to settle in and take shape at organizations around the world. But what do “modern” DevOps practices look like today, and how are they likely to change? Three market research firms gave us their take on the current generation of DevOps, and what’s coming next. \n\n## BizDevOps anyone?\n\nIf there’s one clear sign of DevOps maturity, it’s the fact that the business side has seamlessly inserted itself into what was forever a bastion of technologies and tech-driven practices. With some of the [bigger DevOps battles well in hand](/developer-survey/) (broader adoption of automation, more frequent deployments, and increased software testing), teams are able to bring in new metrics, including user experience, customer satisfaction, and other business drivers. 451 Research found business objectives and outcomes are the leading priority (51%) for enterprises as they refine, improve, and expand their DevOps implementations. In fact, 451 said business metrics are now almost as important a measure of DevOps success as technical achievements like application performance and quality.(1)\n\nFurther underscoring the way modern DevOps practices have broadened their focus is the increasing interest in value stream management, which looks at the software development lifecycle from idea generation to customer delivery and satisfaction level. Research firm IDC, in its \"Accelerated App Delivery Survey 2021\" (U.S. Results)(3), published January 2022, said value stream management is going to be one of the top investment priorities for DevOps teams this year. \n\n## DevOps and security\n\nBut the focus on business objectives doesn’t mean that work on the tech side of DevOps is done; in fact, DevSecOps and security in DevOps in general continue to be a tricky balance for many teams. IDC, also in its \"Accelerated App Delivery Survey 2021\" (referenced above), notes the cognitive dissonance of DevOps teams saying security is a top priority and feeling confident about their security posture while at the same time acknowledging DevSecOps is only in use for 25% or less of application development on average. \n\nForrester Research, in its \"State of Application Security, 2022\" (May 9, 2022), said: “Savvy security professionals know that to drive application security tool adoption, they must engage developers in the technology decision-making process. With both tooling\ndecision-making and budget moving to development, security pros must redefine\ntheir role in application security and take advantage of the opportunity to become\nmore strategic.”\n\n## The role of DevOps platforms\n\nSecurity is an ongoing substantive issue on DevOps teams but there are also a number of smaller, but still significant, problems teams need to solve, including [toolchain debt](/blog/battling-toolchain-technical-debt/), the challenge of scaling, and the need for a product and platform structure. DevOps platforms can help with all of those challenges.\n\nFor starters, it’s nearly impossible to scale DevOps throughout an enterprise without a DevOps platform supporting the effort. A platform provides a single source of truth for all teams, eliminates handoffs, and allows visibility into every stage of the process. A DevOps platform also helps eliminate the inefficiencies caused by too many tools and toolchains. Our [2022 Global DevSecOps Survey](/developer-survey/) found 69% of teams want to streamline their toolchains to reduce time spent on maintenance/integration and improve developer quality of life.\n\nWhat does a DevOps platform look like in 2022? Forrester Research, in \"The Forrester Guide to DevOps 2022\" (September 14, 2022) said modern DevOps platforms are “integrated and automated; create a software automation abstraction layer; use SLAs to drive continuous improvement; and have cloud platforms as deployment targets of choice.”\n\n## Culture (still) matters\n\nIn the early days of DevOps the talk was **all** about the culture challenge of bringing the vastly different dev and ops teams together. Somewhat surprisingly, market research firms are still talking about culture today, perhaps because the definition of DevOps has expanded to include more than just dev, ops, and even sec: BizDevSecUXTestPlatformLowCodeOps... ad infinitum, apparently.\n\nOrganizations wanting DevOps success must continue to push the importance of culture, collaboration, and communication, IDC reported in its \"Accelerated App Delivery Survey 2021.\" Forrester Research offered a stark assessment in \"The State of DevOps, 2022\" (June 27, 2022): “Never underestimate the importance of cultural transformation. Laggard organizations punish the bearers of bad tidings and don’t understand failure as a learning opportunity. Exorcizing these toxic attitudes is far easier said than done.”\n\n## Modern DevOps means modern technologies\n\nModern DevOps teams continue to incorporate new technologies into their practices. Two standouts: [AI/ML](/blog/why-ai-in-devops-is-here-to-stay/) and [GitOps](/blog/the-ultimate-guide-to-gitops-with-gitlab/). 451 points to rising interest in AIOps specifically to address the “too much information” problem with logs and metrics.(2) \n\n## Looking forward\n\nChange is of course a given and it’s safe to say that DevOps teams will face new organizational structures, new teammates, and complicated technology adoption challenges.\n\n### Cross-functional teams organized around products\n\nAfter years of bringing dev and ops together, some believe it’s time to reach out further. Forrester, in \"The Future of DevOps\" (June 8, 2022), said: “In the future, cross-functional teams, from business stakeholders to operational site reliability engineers (SREs), will organize around products, delivering business value via DevOps platforms.”\n\n### Wider and deeper platforms\n\nAnd those DevOps platforms “will consolidate, extend and deepen,” Forrester predicts in \"The Future of DevOps,\" cited above.\n\n### Introducing new teammates\n\nRoughly 66% of our 2022 DevSecOps Survey respondents told us their DevOps practices include a low code/no code tool. And that’s going to spread to all teams in the coming years. “Citizen development is a logical evolution of how enterprises deliver apps and enable digital business,” Forrester Research said in \"The Future of DevOps.\"\n\n### DevOps on the edge\t\n\nWith the Internet of Things and 5G becoming larger on the horizon, it’s not much of a stretch to predict modern DevOps teams will need to be able to support products with data literally [“on the edge.”](https://www.techtarget.com/searchdatacenter/definition/edge-computing) \n\n- [1] 451 Research, a part of S&P Global Market Intelligence, Mature DevOps Means Business, Jay Lyman, Senior Research Analyst, June 2022\n- [2] 451 Research, a part of S&P Global Market Intelligence, Business Objectives and Benefits Become Top Priority - Highlights from VotE DevOps, Jay Lyman, Senior Research Analyst, April 2022\n- [3] IDC, U.S. Accelerated Application Delivery Survey, Doc #US47924622, Jan 2022\n",[1589,1428,1128],{"slug":1864,"featured":6,"template":683},"a-snapshot-of-modern-devops-practices-today","content:en-us:blog:a-snapshot-of-modern-devops-practices-today.yml","A Snapshot Of Modern Devops Practices Today","en-us/blog/a-snapshot-of-modern-devops-practices-today.yml","en-us/blog/a-snapshot-of-modern-devops-practices-today",{"_path":1870,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1871,"content":1877,"config":1882,"_id":1884,"_type":16,"title":1885,"_source":18,"_file":1886,"_stem":1887,"_extension":21},"/en-us/blog/why-devops-collaboration-continues-to-be-important",{"title":1872,"description":1873,"ogTitle":1872,"ogDescription":1873,"noIndex":6,"ogImage":1874,"ogUrl":1875,"ogSiteName":697,"ogType":698,"canonicalUrls":1875,"schema":1876},"Why DevOps collaboration continues to be important","Modern DevOps isn't just about tech adoption and new processes. DevOps collaboration is going to play a key role. Here's why.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663397/Blog/Hero%20Images/logoforblogpost.jpg","https://about.gitlab.com/blog/why-devops-collaboration-continues-to-be-important","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why DevOps collaboration continues to be important\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-10-25\",\n      }",{"title":1872,"description":1873,"authors":1878,"heroImage":1874,"date":1879,"body":1880,"category":14,"tags":1881},[1859],"2022-10-25","\nIt’s tempting to think the concept of DevOps collaboration is something no one needs to talk about anymore. After all, the methodology has been around for nearly 15 years, is in widespread use, and has clearly proven to be successful at getting safer software out the door faster. Haven’t we figured out DevOps collaboration by now?\n\nThe answer is no, at least according to our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/) and to industry experts looking at the future of DevOps.\n\nFor starters, dev and ops respondents to our survey told us programming languages and soft skills like collaboration are going to be most important for their careers going forward. DevOps collaboration was the second most important skill for sec pros surveyed. These results were far from a one-off: In our [2020 survey](/images/developer-survey/gitlab-devsecops-2021-survey-results.pdf), dev, sec, and ops were unanimous that “soft skills,” including DevOps collaboration, were the most critical for future careers. In [2021](/images/developer-survey/gitlab-devsecops-2021-survey-results.pdf), sec and ops continued to prioritize DevOps collaboration for the future, while devs opted for AI/ML. \n\nThis year, we asked over 5,000 survey takers what would be most important to their careers, but we didn’t ask *why* it would be so important. A look at some recent thought leadership around DevOps collaboration sheds some light.\n\nAccording to [an article in SDX Central](https://www.sdxcentral.com/articles/analysis/devops-its-about-the-people/2022/07/), pundits think collaboration is “critical for DevOps success” today and in the future. An [article in Tech Beacon](https://techbeacon.com/app-dev-testing/future-devops) goes further, suggesting DevOps will embrace business metrics as a measure of success going forward, and, as such, will require levels of cross-functional collaboration not seen before. \n\nIn other words, as DevOps expands beyond a technology goal (develop software) to a business goal (ensure customer satisfaction or business profitability), more teams will be seated at the table. The more people involved, the more DevOps collaboration will be critical to the future.\n\nWe’d like to know how DevOps collaboration works on _your_ team. Our 12-question survey will take you less than four minutes! [Take the survey!](/blog/take-our-survey-on-collaborative-software-development/)\n",[1128,1589,1428],{"slug":1883,"featured":6,"template":683},"why-devops-collaboration-continues-to-be-important","content:en-us:blog:why-devops-collaboration-continues-to-be-important.yml","Why Devops Collaboration Continues To Be Important","en-us/blog/why-devops-collaboration-continues-to-be-important.yml","en-us/blog/why-devops-collaboration-continues-to-be-important",{"_path":1889,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1890,"content":1896,"config":1902,"_id":1904,"_type":16,"title":1905,"_source":18,"_file":1906,"_stem":1907,"_extension":21},"/en-us/blog/why-the-market-is-moving-to-a-platform-approach-to-devsecops",{"title":1891,"description":1892,"ogTitle":1891,"ogDescription":1892,"noIndex":6,"ogImage":1893,"ogUrl":1894,"ogSiteName":697,"ogType":698,"canonicalUrls":1894,"schema":1895},"Why the market is moving to a platform approach to DevSecOps","A single DevOps platform improves ROI, the developer experience, and customer retention and satisfaction.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667886/Blog/Hero%20Images/cobolshortage.jpg","https://about.gitlab.com/blog/why-the-market-is-moving-to-a-platform-approach-to-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why the market is moving to a platform approach to DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-10-24\",\n      }",{"title":1891,"description":1892,"authors":1897,"heroImage":1893,"date":1898,"body":1899,"category":14,"tags":1900},[1524],"2022-10-24","The market is moving to a platform approach to [DevSecOps](/topics/devsecops/). What had previously been a process that let different engineering teams adopt their own tools for different stages of the software development lifecycle – what we call “DIY DevOps” – is being replaced by a method that leverages a single application.\n\nWhy is this happening? First, IT managers are coming to grips with the inefficiencies and cost of toolchain sprawl. Second, executives are relying on digital transformation to solve significant business-level problems: improving developer onboarding and productivity, building high-performing teams, securing the software supply chain, and creating a secure on-ramp to the public cloud. Finally, there’s the impact of [the potential recession](https://www.worldbank.org/en/news/press-release/2022/09/15/risk-of-global-recession-in-2023-rises-amid-simultaneous-rate-hikes), which has accelerated the above trends.\n\nWe recently commissioned a [Forrester Consulting “Total Economic Impact™ of GitLab’s Ultimate Plan” study](https://page.gitlab.com/resources-study-forrester-tei-gitlab-ultimate.html) to better understand how companies save on costs and achieve business and technology goals with GitLab. We focused on our Ultimate tier, which is the fastest growing part of the business. We believe the results align with the business requirements needed to endure economic headwinds and position companies for success: strong return on technology investment, cost savings through technical tool consolidation, a faster pace of application releases to acquire and retain customers, greater development and delivery efficiency, increased and simplified security, and a rapid payback period. \n\nGitLab’s DevOps platform enables source code management, continuous integration/continuous delivery, advanced security capabilities, and more in a single application. The Forrester study found that combination led to:\n\n* Three-year ROI of 427%\n* 12x increase in the number of annual releases for revenue generation applications\n* 87% improvement in development and delivery efficiency time\n* Less than six-month payback period\n\n## Understanding DevOps pain points\n\nTo realize the benefits of a single DevOps platform, organizations have to assess their pain points. Here are some common development lifecycle obstacles that affect organizations of all sizes:\n\n* Complex toolchains and processes\n* Inefficient development environments\n* Lack of security skills\n* Rushed development cycles\n* No single source of truth or single code repository\n* Poor software testing practices\n\nAll of these pain points can impede an organization’s ability to manage through a recession and recovery. \n\n## The benefits of a DevOps platform\n\nThe Forrester study found that GitLab Ultimate provided a composite organization, based on interviewed customers, 10 key quantified benefits over a three-year period. While each benefit on its own could have a positive impact on a business’s ability to stay steady and even thrive during difficult economic times, together they are a powerhouse that can eliminate many pain points.\n\nHere are five of those benefits of the GitLab Ultimate Plan:\n\n### Vulnerability management\n\nAs GitLab’s 2022 Global DevSecOps Survey found, [security is top of mind](/blog/gitlabs-2022-global-devsecops-survey-security-is-the-top-concern-investment/) for all DevOps organizations. Yet security at scale can be challenging, especially finding and hiring professionals with the right skills.\n\nA benefit of GitLab Ultimate, according to the Forrester study, is greater efficiency in managing vulnerabilities. The DevOps platform [integrates and automates vulnerability management](/direction/govern/threat_insights/vulnerability_management/) within the development lifecycle. Issues can be identified, logged, triaged, tracked, and remediated – all in the same DevOps application. Developers can address vulnerabilities in real time, avoiding release delays or software defects and bugs. According to Forrester, the composite organization realized savings of “hours a week because developers have access to better context about the vulnerabilities. This in turn means less back and forth between development and QA/security on an issue.”\n\n### Less homegrown tool development/open source solution management\n\nDevOps teams often spend a considerable amount of time creating tools they need from scratch or finding and managing open source options. GitLab reduces [toolchain complexity (a.k.a. debt)](/blog/battling-toolchain-technical-debt/) by building into the platform the tools and features developers need, enabling them to manage their environment as a single application. GitLab Ultimate enabled the Forrester study’s composite organization to shift “from manually intensive tasks requiring the full attention of the developer, security, and operations teams to an environment where they now spend no more than a few hours per day per person on the same tasks.”\n\n### Efficient development\n\nA highly efficient development process impacts the developer experience, which improves retention. GitLab Ultimate enabled the composite organization to develop code faster, deliver higher quality code, enable better collaboration, and improve the ability to monitor applications, according to the Forrester study. Other advantages include: more streamlined processes, better efficiency among developers and non-technical teammates, and improved visibility and collaboration across the SDLC.\n\n### Better code quality\n\nPoor code quality directly affects a company’s ability to attract and retain customers. GitLab enabled the composite organization to have “a single application that streamlines processes to ensure code is tested, scanned, and verified before it is released,” according to the Forrester study. The result is high-quality code (with reduced defects and bugs) that meets security standards.\n\n### More releases, faster\n\nOrganizations want to be able to address customer needs for newer applications, updates, and enhanced feature sets in a timely fashion. With GitLab, the composite organization can “increase the velocity of updates and releases, allowing it to meet customers’ rising digital demands.”\n\nDevOps brought about the following unquantified benefits for the composite organization, according to the Forrester study: more satisfied employees because they are more productive and collaborative; more satisfied customers because of a smoother project workflow, improved release quality, and a faster release frequency; and improved market innovation and competitiveness due to faster development lifecycle and time to market.\n\nWhile DevOps platform benefits are applicable to any economic environment, they are even more so in this time of economic uncertainty. GitLab enables organizations to extract the most out of their DevOps environment and achieve faster, higher quality, and more secure development and release cycles.\n\n> Download the full [Forrester Consulting “Total Economic Impact of GitLab’s Ultimate Plan” study](https://page.gitlab.com/resources-study-forrester-tei-gitlab-ultimate.html) for:\n\n* Additional benefits of GitLab Ultimate Plan\n* Testimonials from GitLab customers Forrester interviewed\n* Assumptions and risks to calculate ROI",[1128,1901,728],"research",{"slug":1903,"featured":6,"template":683},"why-the-market-is-moving-to-a-platform-approach-to-devsecops","content:en-us:blog:why-the-market-is-moving-to-a-platform-approach-to-devsecops.yml","Why The Market Is Moving To A Platform Approach To Devsecops","en-us/blog/why-the-market-is-moving-to-a-platform-approach-to-devsecops.yml","en-us/blog/why-the-market-is-moving-to-a-platform-approach-to-devsecops",{"_path":1909,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1910,"content":1916,"config":1924,"_id":1926,"_type":16,"title":1927,"_source":18,"_file":1928,"_stem":1929,"_extension":21},"/en-us/blog/gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development",{"title":1911,"description":1912,"ogTitle":1911,"ogDescription":1912,"noIndex":6,"ogImage":1913,"ogUrl":1914,"ogSiteName":697,"ogType":698,"canonicalUrls":1914,"schema":1915},"Oracle and GitLab partner for cloud-native app development","Learn the benefits of deploying the DevOps platform on Oracle Cloud Infrastructure.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668514/Blog/Hero%20Images/multi-cloud-future.jpg","https://about.gitlab.com/blog/gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and Oracle partner for a cloud native approach to modern application development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Creighton Swank\"},{\"@type\":\"Person\",\"name\":\"Vick Kelkar\"}],\n        \"datePublished\": \"2022-10-20\",\n      }",{"title":1917,"description":1912,"authors":1918,"heroImage":1913,"date":1921,"body":1922,"category":14,"tags":1923},"GitLab and Oracle partner for a cloud native approach to modern application development",[1919,1920],"Creighton Swank","Vick Kelkar","2022-10-20","\nModern application development requires a cloud native platform that can operate in and across multiple cloud providers. GitLab has partnered with Oracle to enable customers to run GitLab’s DevOps platform on Oracle Cloud Infrastructure (OCI).\n\nWith OCI, organizations can accelerate migrations of existing enterprise workloads, deliver better reliability and performance for all applications, and offer the complete services customers need to build innovative cloud applications. With GitLab’s DevOps platform and OCI, businesses can create a resilient, high-performance DevOps environment. OCI also supports automatic operating system patching and zero trust architecture, which aligns with GitLab’s focus on [application security](/stages-devops-lifecycle/secure/).\n\n## The benefits of pairing GitLab and OCI\n\nPairing GitLab’s DevOps platform and OCI provides many benefits, including the following:\n\n- performance\n- platform breadth\n- security\n- value\n- hybrid and multi-cloud environments\n- GovCloud regions\n\n### Performance\n\nOCI provides a high-performance, resilient foundation for cloud services. Customers can quickly provision instances that feature the latest-generation processors via API, SDK, command line, Terraform, or the console. Workloads can scale up and/or out based on their requirements and compute-intensive workloads can leverage GPU shapes for hardware acceleration of AI/ML workloads. At the same time, GitLab runners can be configured to [leverage Nvidia GPUs](https://docs.gitlab.com/runner/configuration/gpus.html) for various executors to take advantage of GPUs and AI/ML workloads. \n\n### Platform breadth\n\nGitLab’s DevOps platform has the ability to integrate with Kubernetes service like OKE via GitLab Kubernetes agent. Leveraging GitLab’s Kubernetes agent will unlock [GitOps workflow](https://docs.gitlab.com/ee/user/clusters/agent/gitops.html) and [CI/CD workflow](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html) for cloud native development. And the Oracle Cloud Infrastructure also offers a wide variety of platform services that allow customers to run workloads without having to manage infrastructure. Customers can run workloads on compute instances, in containers with Oracle Kubernetes Engine (OKE), or even as serverless functions. Services like object storage and events can be leveraged to build applications without managing infrastructure at all. For a complete list of these services, please click [here](https://docs.oracle.com/en-us/iaas/Content/services.htm). \n\n### Security\n\nThe second generation of OCI has been redesigned from the ground up to be a secure cloud. Oracle designed OCI architecture for security of the platform through isolated network virtualization, highly secure firmware installation, a controlled physical network, and network segmentation. GitLab’s DevOps platform is not only an ODIC provider but the platform integrates with other identity providers to support single sign-on capabilities. The platform’s [permission model](https://docs.gitlab.com/ee/user/permissions.html#instance-wide-user-permissions) follows similar approaches used by OCI around separation of concerns and role-based access to resources. \n\n### Value\n\nMission-critical and revenue-generating applications demand more than just availability from their cloud infrastructure. Mission-critical workloads also require consistent performance and the ability to manage, monitor, and modify resources running in the cloud at any time. OCI offers end-to-end SLAs covering performance, availability, and manageability of services. \n\nGitLab’s DevOps platform uses the same code base for the SaaS offering as well as self-managed instances. Having the same code base allows customers to adopt the mission-critical DevOps platform in heavily regulated industries such as financial services and healthcare.\n\n### Support for hybrid and multi-cloud environments\n\nEven though many enterprises are moving workloads to the cloud, the reality is this is a multi-cloud world, and many enterprises still maintain infrastructure locally. Oracle has entered into strategic partnerships designed to make it easier for customers to operate in a hybrid and multi-cloud environment. \n\nOracle has partnered with VMware to create the Oracle Cloud VMware solution that allows customers the ability to use their existing tools and processes to manage a VMware environment in OCI. This allows enterprises to accelerate cloud adoption without having to re-architect their applications.\n\nGitLab’s DevOps platform can be deployed on vSphere infrastructure using the GitLab [omnibus install](https://docs.gitlab.com/omnibus/) method. The platform can be installed on-premises or in the cloud. GitLab can be deployed on VMs and the GitLab runners can extend CI capabilities into other cloud environments and [cloud-native hybrid](https://docs.gitlab.com/ee/administration/reference_architectures/#cloud-native-hybrid) deployments.\n\n### GovCloud regions\n\nOCI can provide government customers with the stringent security standards necessary to protect the federal government's data. Oracle has obtained a P-ATO from the Joint Authorization Board for FedRAMP High in its U.S. Government Cloud regions. Varying levels of DISA authorizations are also available but vary by services. Find an up-to-date list [here](https://www.oracle.com/industries/government/federal/fedramp/). Meanwhile, GitLab is pursuing a FedRAMP moderate certification and working on activities related to FedRAMP-ready designation. \n\n## Get started with the GitLab DevOps platform and OCI\nOrganizations looking to run GitLab’s DevOps platform on OCI can leverage the supported [Oracle Linux](/install/) package for the platform install. Alternatively, they can leverage the helm chart or GitLab Operator to deploy to Oracle Kubernetes Engine (OKE), which will provide a [cloud-native hybrid approach](https://docs.gitlab.com/ee/administration/reference_architectures/25k_users.html#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative) of the GitLab DevOps platform on OCI.\n\nGitLab’s DevOps platform, delivered as a single application, can run on multiple clouds and has the capability of supporting various official [Linux packages](/install/). Besides Linux packages, GitLab’s platform also supports deployments on Kubernetes using [helm charts](https://docs.gitlab.com/charts/) and Kubernetes [GitLab Operator](https://docs.gitlab.com/operator/). \n\nIf you would like to learn more about the GitLab DevOps platform and OCI, please access the [LiveLabs](https://apexapps.oracle.com/pls/apex/dbpm/r/livelabs/home).\n\n_[Kelkar](https://gitlab.com/vkelkar) is GitLab's Director of Alliances. Swank is Distinguished Cloud Architect and Cloud CTO at Oracle._\n",[1128,1488,1368,284],{"slug":1925,"featured":6,"template":683},"gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development","content:en-us:blog:gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development.yml","Gitlab And Oracle Partner For A Cloud Native Approach To Modern Application Development","en-us/blog/gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development.yml","en-us/blog/gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development",{"_path":1931,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1932,"content":1938,"config":1944,"_id":1946,"_type":16,"title":1947,"_source":18,"_file":1948,"_stem":1949,"_extension":21},"/en-us/blog/how-modern-devops-practices-are-changing-the-operations-role",{"title":1933,"description":1934,"ogTitle":1933,"ogDescription":1934,"noIndex":6,"ogImage":1935,"ogUrl":1936,"ogSiteName":697,"ogType":698,"canonicalUrls":1936,"schema":1937},"How modern DevOps practices are changing the operations role","Today, the ops role is about far more than just keeping the lights on. Here's how modern DevOps practices are expanding ops' responsibilities.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663982/Blog/Hero%20Images/2022-devsecops-survey-blog-header.png","https://about.gitlab.com/blog/how-modern-devops-practices-are-changing-the-operations-role","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How modern DevOps practices are changing the operations role\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-10-19\",\n      }",{"title":1933,"description":1934,"authors":1939,"heroImage":1935,"date":1940,"body":1941,"category":14,"tags":1942},[1859],"2022-10-19","\nRemember NoOps, the idea that automation would eliminate the operations role completely? Fast forward a few years and the idea of NoOps today seems almost laughable. In today’s modern DevOps teams it’s safe to say it’s really _“AlltheOps_,” at least based on the results of our [2022 Global DevSecOps Survey](/developer-survey/).\n\n## An expanding role\n\n[No DevOps job is static](/blog/the-changing-roles-in-devsecops/), but ops pros are experiencing truly dramatic changes to their work lives. In fact, ops pros reported seven areas of responsibility now added to their plates thanks to modern DevOps practices:\n\n- Managing the cloud\n- Managing the hardware/infrastructure\n- Maintaining the toolchain\n- DevOps coaching\n- Responsibility for automation\n- Overseeing all compliance and audits\n- Platform engineering\n\nManaging the cloud and hardware/infrastructure were the two tasks most frequently named, and they were split nearly evenly down the middle, with roughly 50% of ops pros focusing on one or the other task primarily. Another area – [maintaining the toolchain](/blog/too-many-toolchains-a-devops-platform-migration-is-the-answer/) – is apparently now a job shared with developers, as devs also told us they were spending more time on toolchain maintenance and integration than ever before. That’s not surprising: 44% of teams reported they use between two and five tools, while 41% use between six and 10 tools. That’s a lot of tools, which is clearly one reason for the added ops support. \n\nCompliance and audits are another “new to Ops” area of focus, and this added emphasis comes at a time when organizations everywhere are trying to avoid security breaches with [an increased focus on compliance](/blog/the-importance-of-compliance-in-devops/). It’s a time-consuming process: The majority of ops pros told us they spend between one-quarter and half their time [on audits and compliance](/blog/what-you-need-to-know-about-devops-audits/), a 15% increase since 2021. Almost 25% of ops pros spend between half and three-quarters of their time on these tasks. \n\n## Keeping the balls in the air\n\nThe rising use of [DevOps platforms](/topics/devops-platform/) (75% of our respondents said their organizations already use a DevOps platform or plan to add one this year) is driving operations team members toward [platform engineering](/topics/devops/what-is-a-devops-platform-engineer/). Operations pros are also doubling down on tasks that were likely more informal in the past: DevOps coaching and responsibility for automation. The focus on automation is clearly paying off: In 2022, just shy of 25% of ops pros said their modern DevOps practices were fully automated, up 5 points from 2021 and nearly 17 points from 2020. All told, 68% of ops pros said their DevOps teams were “completely” or “mostly” automated.\n\nAnd while ops is adding new responsibilities thanks to modern DevOps, developers are picking up tasks that have traditionally belonged to operations:\n\n- Nearly 77% of devs can provision their own environments.\n- Roughly 38% of developers instrument the code.\n- Another 38% monitor and respond to the infrastructure. \n- 36% of devs said they’re on-call for in-app production alerts.\n\nThe role-swapping doesn't stop there: Nearly 50% of ops pros said they're solely responsible for security on their DevOps teams, up 20% from last year. To put that into perspective, 53% of security respondents told us they felt security was *everyone's* responsiblity.\n\n## Ops, modern DevOps, and TMI\n\nOps pros’ new roles have created some surprising by-products, namely loads of data that teams aren’t necessarily set up to manage effectively. In fact, many of today’s operations teams have a “too much information” problem. A full 39% of ops pros said the DevOps data they need exists but accessing and managing it is difficult. Another 27% said they’re “overwhelmed” by the amount and scope of the data while 14% don’t know what data they need or say their organizations don’t track it. Less than 20% of ops pros say they have the data they need and it’s easy to work with.\n\nHow do you see the ops role changing in the modern DevOps world? Let us know in the comments.\n",[1128,1589,1943],"contributors",{"slug":1945,"featured":6,"template":683},"how-modern-devops-practices-are-changing-the-operations-role","content:en-us:blog:how-modern-devops-practices-are-changing-the-operations-role.yml","How Modern Devops Practices Are Changing The Operations Role","en-us/blog/how-modern-devops-practices-are-changing-the-operations-role.yml","en-us/blog/how-modern-devops-practices-are-changing-the-operations-role",{"_path":1951,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1952,"content":1957,"config":1962,"_id":1964,"_type":16,"title":1965,"_source":18,"_file":1966,"_stem":1967,"_extension":21},"/en-us/blog/gitlab-incident-timelines",{"title":1953,"description":1954,"ogTitle":1953,"ogDescription":1954,"noIndex":6,"ogImage":1874,"ogUrl":1955,"ogSiteName":697,"ogType":698,"canonicalUrls":1955,"schema":1956},"How to leverage GitLab incident timelines","What actually happened before, during, and after the incident? Now it's easier to keep track.","https://about.gitlab.com/blog/gitlab-incident-timelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to leverage GitLab incident timelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Alana Bellucci\"}],\n        \"datePublished\": \"2022-10-18\",\n      }",{"title":1953,"description":1954,"authors":1958,"heroImage":1874,"date":1959,"body":1960,"category":14,"tags":1961},[1820],"2022-10-18","\n\n_When you're working on an incident, every second counts._ Team members and leadership are looking for updates. Any interruption can make you lose track of where you were. Finding the root cause or working on a code change to resolve the incident requires time and focus. After the incident is resolved, you'll need to provide a summary of what happened during the post-incident review. How can you provide updates and keep track of important events while working on the incident?\n\n## Incident timelines with GitLab\n\nGitLab recently launched [incident timelines](https://docs.gitlab.com/ee/operations/incident_management/incident_timeline_events.html).  Incident timelines are the single source of truth (SSoT) for key updates and events that happen during an incident. They typically include things like when the incident was declared, who is actively working on the incident, and other important events during the incident; i.e. \"Disabling Canary to test a hot fix.\"\n\nUpdating the timeline needs to be done quickly and efficiently. Use GitLab quick actions to add multiple timeline items programmatically.\n\n![quick_action](https://about.gitlab.com/images/blogimages/incident-mgmt/incident_timeline_quick_actions.png)\n\nOr [add any comment from the incident to the timeline](https://docs.gitlab.com/ee/operations/incident_management/incident_timeline_events.html#from-a-comment-on-the-incident) by clicking on the clock icon. This helps avoid the unnecessary shoulder taping for updates so users can focus on firefighting.  \n\nWhen you're at the end of your on-call shift, you can share the timeline as you hand off the incident to summarize what's happened so far. If you've missed adding something important to the timeline, you can always add the event retroactively and post-date it to the correct time. When you wake up for your next shift, you can review what happened while you were away.\n\n## Keeping a record with incident timelines\n\nOnce an incident has been resolved, it can be hard to piece together what actually happened. Sometimes, post-incident reviews don't happen until days after you've worked on the incident. _Did the incident originate from an alert or was it from a customer email?_ _Did we meet our Service Level Agreement (SLA)?_ Since you've kept track along the way, incident timelines can be a quick way to refresh your memory on what happened during the incident.  \n\nEstablishing incident timelines as a SSoT minimizes the time spent on incident \"paperwork.\" This gives you time to focus on resolving the incident. Once the incident resolves you can review with team members to minimize the chance of the same incident occurring again.\n\nThe [GitLab Infrastructure Team](/handbook/engineering/infrastructure/#dogfooding) has been testing [dogfooding](https://www.urbandictionary.com/define.php?term=Dog%20fooding) and using incident timelines. We'd love to hear about how you are constructing and recording what happens during an incident. You can also take a look at [Improving the Incident Timeline](https://gitlab.com/groups/gitlab-org/-/epics/8256) and help influence what we build next.\n",[680,1428,836],{"slug":1963,"featured":6,"template":683},"gitlab-incident-timelines","content:en-us:blog:gitlab-incident-timelines.yml","Gitlab Incident Timelines","en-us/blog/gitlab-incident-timelines.yml","en-us/blog/gitlab-incident-timelines",{"_path":1969,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1970,"content":1975,"config":1980,"_id":1982,"_type":16,"title":1983,"_source":18,"_file":1984,"_stem":1985,"_extension":21},"/en-us/blog/take-our-survey-on-collaborative-software-development",{"title":1971,"description":1972,"ogTitle":1971,"ogDescription":1972,"noIndex":6,"ogImage":1243,"ogUrl":1973,"ogSiteName":697,"ogType":698,"canonicalUrls":1973,"schema":1974},"Take our survey on collaborative software development!","If you have 4 minutes, we'd love to hear about how your organization collaboratively develops software.","https://about.gitlab.com/blog/take-our-survey-on-collaborative-software-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Take our survey on collaborative software development!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-10-12\",\n      }",{"title":1971,"description":1972,"authors":1976,"heroImage":1243,"date":1977,"body":1978,"category":14,"tags":1979},[1524],"2022-10-12","\u003Cdiv data-tf-widget=\"hYFvwmA0\" data-tf-iframe-props=\"title=How collaborative is your software organization?\" data-tf-medium=\"snippet\" style=\"width:100%;height:400px;\">\u003C/div>\u003Cscript src=\"//embed.typeform.com/next/embed.js\">\u003C/script>\n",[1128],{"slug":1981,"featured":6,"template":683},"take-our-survey-on-collaborative-software-development","content:en-us:blog:take-our-survey-on-collaborative-software-development.yml","Take Our Survey On Collaborative Software Development","en-us/blog/take-our-survey-on-collaborative-software-development.yml","en-us/blog/take-our-survey-on-collaborative-software-development",{"_path":1987,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1988,"content":1994,"config":1999,"_id":2001,"_type":16,"title":2002,"_source":18,"_file":2003,"_stem":2004,"_extension":21},"/en-us/blog/the-top-software-developer-challenges-in-2022",{"title":1989,"description":1990,"ogTitle":1989,"ogDescription":1990,"noIndex":6,"ogImage":1991,"ogUrl":1992,"ogSiteName":697,"ogType":698,"canonicalUrls":1992,"schema":1993},"The top software developer challenges in 2022","From AI to hiring, security breaches and Covid, our 2022 Global DevSecOps Survey uncovered the top software developer challenges.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668107/Blog/Hero%20Images/global-developer-survey.png","https://about.gitlab.com/blog/the-top-software-developer-challenges-in-2022","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The top software developer challenges in 2022\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-10-05\",\n      }",{"title":1989,"description":1990,"authors":1995,"heroImage":1991,"date":1996,"body":1997,"category":14,"tags":1998},[1859],"2022-10-05","\nIn our 2022 Global DevSecOps Survey we asked developers about the most difficult parts of their jobs, a question that’s been answered in previous years with comments about tricky toolchain integrations, complex programming languages and business-side folks who \"just don’t get it.”\n\nBut apparently this year *we* didn’t get it: [More than 5,000 respondents](/developer-survey/) told us they were worried about the inability to hire and retain talent, the economy and the post-Covid world they’re expected to work in. They are also concerned about adoption of complex technologies such as artificial intelligence, 5G and edge computing, and the fear of/responsibility for security breaches and what that would mean to their organizations.\n\n(That sound you hear in the background is the shattering of the “devs are oblivious to business” stereotype.)\n\nObviously a tectonic shift in the developer role is underway.\n\n“Two massive waves are crashing against each other right now,” explains [Brendan O’Leary](/company/team/#brendan), staff developer evangelist at GitLab. “One wave is developers as kingmakers. We were ‘brought into the palace’ because every company needed to have software as its core competency and the pendulum swung toward developers. But the other wave is the massive correction in the market. These two things happening at the same time are putting a huge squeeze on businesses and developers.”\n\nA [longstanding shortage of software developers](https://www.forbes.com/sites/forbestechcouncil/2021/06/08/is-there-a-developer-shortage-yes-but-the-problem-is-more-complicated-than-it-looks/?sh=215d08f33b8e) has been made worse by macroeconomic conditions, but demand for software isn’t decreasing despite the market upheaval, O'Leary adds. The result is devs at the center of nearly all the most difficult challenges today, from [hiring](/blog/6-tips-to-make-software-developer-hiring-easier/) to [security breaches](/blog/gitlabs-2022-global-devsecops-survey-security-is-the-top-concern-investment/) and new technologies. \n\nTo put it another way: “We can’t be flippant about any part of the job anymore,” he adds.\n\nHere’s a look at what is keeping developers up at night.\n\n### Security\n\nMore than 1,000 respondents said all of the issues around security make their jobs infinitely more difficult and complicated.\n\n- “(The hardest thing is to) keep it secure and keep it updated.”\n- “My challenge is keeping up with the latest tools and security for optimal performance and privacy.”\t\t\t\n- “I am trying to build applications that are secure and stable.”\n- “Cybersecurity attacks are the biggest challenge facing us today.”\n- “The hardest part of my job? Data security, data security, I repeat, data security.”\n\n### “The Covid effect”\n\nHundreds of survey takers pointed to the changes brought about by Covid, including remote/hybrid work, economic forces, \"The Great Resignation,” and a number of other things. One respondent called it “the Covid effect” and many stressed that this new way of working has made their fast-paced jobs harder.\n\n### Staffing\n\nHard to hire, hard to keep, hard to even find...that’s what survey takers said about the issue of staffing.\n\n- “The biggest challenge is finding sufficient coding staff.”\n- “The biggest challenge is to find people to fill the jobs.”\n- “We have experienced significant difficulty in finding and retaining qualified staff.”\n\n### New technologies\n\nWith all the other pressures on developers, even exciting new technologies can seem daunting. One respondent put it this way:\n\n_“4G, 5G, AI, Metaverse, virtual space - developers have to support all of this.”_\n\nMany, many others simply said: “Technology is rapidly changing.”\n\n## Bold new challenges\n\nThis is all a long way of saying there has perhaps never been more on developers’ plates. Two developer respondents summed it up well:\n\n_“We have a development capacity challenge, a recruiting challenge and a knowledge-sharing challenge.”_\n\n_“For me, these are the eight biggest challenges we are facing as software developers: 1) Keeping pace with innovation. 2) Cultural change. 3) Customer experience. 4) Data privacy. 5) Cybersecurity. 6) AI and automation. 7) Data literacy. 8) Cross-platform functionality.”_\n\nWhat do you see as the biggest challenges facing developers? Let us know in the comments field below.\n",[1589,1128,269],{"slug":2000,"featured":6,"template":683},"the-top-software-developer-challenges-in-2022","content:en-us:blog:the-top-software-developer-challenges-in-2022.yml","The Top Software Developer Challenges In 2022","en-us/blog/the-top-software-developer-challenges-in-2022.yml","en-us/blog/the-top-software-developer-challenges-in-2022",{"_path":2006,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2007,"content":2013,"config":2020,"_id":2022,"_type":16,"title":2023,"_source":18,"_file":2024,"_stem":2025,"_extension":21},"/en-us/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane",{"title":2008,"description":2009,"ogTitle":2008,"ogDescription":2009,"noIndex":6,"ogImage":2010,"ogUrl":2011,"ogSiteName":697,"ogType":698,"canonicalUrls":2011,"schema":2012},"Mobile DevOps: iOS code signing with GitLab CI & Fastlane","Learn how to use Project-level Secure Files with Fastlane Match to sign an iOS app.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668568/Blog/Hero%20Images/vinicius-amnx-amano-IPemgbj9aDY-unsplash.jpg","https://about.gitlab.com/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Mobile DevOps with GitLab, Part 3 - Code signing for iOS with GitLab CI and Fastlane\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darby Frey\"}],\n        \"datePublished\": \"2022-10-03\",\n      }",{"title":2014,"description":2009,"authors":2015,"heroImage":2010,"date":2017,"body":2018,"category":14,"tags":2019},"Mobile DevOps with GitLab, Part 3 - Code signing for iOS with GitLab CI and Fastlane",[2016],"Darby Frey","2022-10-03","\n\nThis post is the third in a series of three blog posts showing how GitLab makes code signing easier using a new feature called Project-level Secure Files.\n\n- [Part 1](/blog/mobile-devops-with-gitlab-part-1/) introduces the Project-level Secure Files feature and the basics of getting started.\n- [Part 2](/blog/mobile-devops-with-gitlab-part-2/) shows an example of how to use Project-level Secure Files to sign an Android app.\n- This post shows how to use the integration with Fastlane Match to sign an iOS app.\n\nCode signing for iOS projects is [notoriously](https://twitter.com/davidcrawshaw/status/1159083791232765953) [difficult](https://twitter.com/bc3tech/status/692778139517255680) and can lead to a lot of time spent debugging errors, but a tool called Fastlane makes it much easier. [Fastlane](https://fastlane.tools/) is an open source tool that greatly simplifies the complexity of the code signing process for iOS development.\n\nIn [Fastlane 2.207.2](https://github.com/fastlane/fastlane/pull/20386) we released support for Project-level Secure Files as a storage backend for Fastlane Match, making it even easier for mobile projects to manage their signing certificates and provisioning profiles within GitLab. Now, we will cover a couple of ways to get started using Project-level Secure Files in a Fastlane project.\n\n## Set up Fastlane Match\n\nIf your project doesn't have a Fastlane Matchfile yet, you can generate one by running the following:\n\n```\nbundle exec fastlane match init\n```\n\nThis command will prompt you to choose which storage backend you want to use (select `gitlab_secure_files`) and to input your project path (for example: `gitlab-org/gitlab`). It will then generate a Fastlane Matchfile configured to use your project's secure files for Fastlane Match.\n\n![Initialize Fastlane Match](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane/match-init.png)\n\n## Generate a Personal Access Token\n\nNext, you'll need a GitLab Personal Access Token to use Fastlane Match from your local machine. To create a Personal Access Token, visit the Access Tokens section in your GitLab profile (for example: [https://gitlab.com/-/profile/personal_access_tokens](https://gitlab.com/-/profile/personal_access_tokens)). Create a new token with the “api” scope. Take note of the token you just created, we'll be using it later.\n\n## Generate and upload \n\nIf you have not created signing certificates or provisioning profiles yet for your project, running Fastlane Match will do all of the work for you. Run the command below with your Personal Access Token:\n\n```\nPRIVATE_TOKEN=YOUR-TOKEN bundle exec fastlane match \n```\n\nYou may be prompted to log in with your Apple developer account. Once authenticated, this command will generate development certificates and profiles in the Apple Developer portal and upload those files to GitLab. You'll be able to view the files in your project's CI/CD settings as soon as the command completes.\n\nYou can also generate other certificate types by specifying the type in the command, for example:\n\n```\nPRIVATE_TOKEN=YOUR-TOKEN bundle exec fastlane match appstore\n```\n\n## Upload-only\n\nIf you have already created signing certificates and provisioning profiles for your project, you can use Fastlane Match Import to load your existing files into Project-level Secure Files. Simply run:\n\n```\nPRIVATE_TOKEN=YOUR-TOKEN bundle exec fastlane match import\n```\n\nYou'll be prompted to input the path to your files. Once those options are provided, your files will be uploaded and visible in your project's CI/CD settings. (Note: If you are prompted for the git_url during the import, it is safe to leave it blank and hit enter.)\n\n![Fastlane Match Import](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane/match-import.png)\n\n## CI/CD pipelines\n\nWith your signing certificates and provisioning profiles loaded in Project-level Secure Files, it's now easy to use those files in your [CI/CD pipelines](/topics/ci-cd/). No access tokens are needed when running jobs in GitLab, so you can load your files into a CI/CD job by adding the fastlane command to a CI job script. For example:\n\n```\ntest:\n  stage: test\n  script:\n    bundle exec fastlane match –readonly\n```\n\nUsing the –readonly flag on CI is suggested to prevent any unintended changes to signing certificates by Fastlane. The Fastlane Match command will sync the certificates to the machine, but does not build the application. To run match and build, configure a lane in your project's Fastfile to do both steps. For example:\n\n**Fastfile**\n\n```\ndefault_platform(:ios)\n\nplatform :ios do\n  desc \"Build the App\"\n  lane :build do\n    setup_ci\n    match(type: 'appstore', readonly: is_ci)\n    build_app(\n      clean: true,\n      project: \"ios_demo.xcodeproj\", \n      scheme: \"ios_demo\"\n    )\n  end\nend\n```\n\n**Matchfile**\n\n```\ngitlab_project(\"gitlab-org/incubation-engineering/mobile-devops/ios_demo\")\nstorage_mode(\"gitlab_secure_files\")\ntype(\"appstore\")\n```\n\n**.gitlab-ci.yml File**\n\n```\nbuild:\n  stage: build\n  script:\n    - bundle exec fastlane build\n```\n\nWith all of that in place, you'll have a CI pipeline that runs a single build job. That job will use the `:build` lane from fastlane to run `setup_ci`, `match`, and `build_app`. The result from that job will be a build of your app, signed with the certificates stored in your project with Project-level Secure Files. You could then extend fastlane to push that build to Test Flight or the App Store.\n\nFastlane does a good job of handling the complexity associated with certificate management, so you don't have to worry about it, but there is a bit of a learning curve to getting used to Fastlane. Take a look at [this branch](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/ios_demo/-/tree/fastlane_build) in the ios_demo project to for a full working example. Please add any feedback you have in the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/362407).\n\n## Better Mobile DevOps\n\nWith Project-level Secure Files, you no longer need to rely on hacks or workarounds to automate code signing, and it can be easily added to new or existing [CI/CD pipelines](/topics/ci-cd/).\n\nFor more about how we are working to make better Mobile DevOps at GitLab, check out the [Mobile DevOps Docs](https://docs.gitlab.com/ee/ci/mobile_devops.html), [SaaS runners on macOS](https://docs.gitlab.com/ee/ci/runners/saas/macos_saas_runner.html), and the [Mobile DevOps Playlist](https://www.youtube.com/playlist?list=PL05JrBw4t0KoVEdembEIySgiciCuZj7Zl) on GitLab Unfiltered.\n\nCover image by \u003Ca href=\"https://unsplash.com/@viniciusamano?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Vinicius \"amnx\" Amano\u003C/a> on \u003Ca href=\"https://unsplash.com/s/photos/complex-to-simple?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>\n",[1466,1128,750],{"slug":2021,"featured":6,"template":683},"mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane","content:en-us:blog:mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane.yml","Mobile Devops With Gitlab Part 3 Code Signing For Ios With Gitlab And Fastlane","en-us/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane.yml","en-us/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane",{"_path":2027,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2028,"content":2034,"config":2039,"_id":2041,"_type":16,"title":2042,"_source":18,"_file":2043,"_stem":2044,"_extension":21},"/en-us/blog/what-are-the-benefits-of-a-microservices-architecture",{"title":2029,"description":2030,"ogTitle":2029,"ogDescription":2030,"noIndex":6,"ogImage":2031,"ogUrl":2032,"ogSiteName":697,"ogType":698,"canonicalUrls":2032,"schema":2033},"What are the benefits of a microservices architecture?","On the fence about what a microservices architecture can bring to your team? Here's what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662898/Blog/Hero%20Images/microservices-explosion.jpg","https://about.gitlab.com/blog/what-are-the-benefits-of-a-microservices-architecture","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What are the benefits of a microservices architecture?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-09-29\",\n      }",{"title":2029,"description":2030,"authors":2035,"heroImage":2031,"date":2036,"body":2037,"category":14,"tags":2038},[1524],"2022-09-29","\n[Microservices architecture](/topics/microservices/) is a framework where an application is separated into smaller services and each of those services typically runs a unique process and manages its own database. There are many pros and cons to microservices. Let's explore them.\n\n## Advantages of microservices architecture\n\n### Scalability improvements\n\nSince each microservice runs independently, it is easier to add, remove, update or scale each cloud microservice. Developers can perform these tasks without disrupting any other microservice in the system. Companies can scale each microservice as needed. For instance, if a particular microservice experiences increased demand because of seasonal buying periods, more resources can be efficiently devoted to it. If demand drops as the season changes, the microservice can be scaled back, allowing resources or computing power to be used in other areas.\n\n### Improved fault isolation\n\nUnder a monolithic architecture structure, when developers experience a failure in one element of the architecture, it will collapse all architecture components. With a microservices architecture, if one service fails, it’s much less likely that other parts of the application will fail because each microservice runs independently. However, businesses need to be careful, because large volumes of traffic can still be overwhelming in some cases.\n\nThe benefit of a microservice architecture is that developers can deploy features that prevent cascading failures. A variety of tools are also available, from GitLab and others, to build fault-tolerant microservices that help improve the resilience of the infrastructure.\n\n### Program language and technology agnostic\n\nA microservice application can be programmed in _any_ language, so dev teams can choose the best language for the job. The fact that microservices architectures are language agnostic also allows the developers to use their existing skill sets to maximum advantage – no need to learn a new programming language just get the work done.\nUsing cloud-based microservices gives developers another advantage, as they can access an application from any internet-connected device, regardless of its platform.\n\n### Simpler to deploy\n\nA microservices architecture lets teams deploy independent applications without affecting other services in the architecture. This feature, one of the pros of microservices, will enable developers to add new modules without redesigning the system's complete structure. Businesses can efficiently add new features as needed under a microservices architecture.\n\n### Reusability across different areas of business\n\nSome microservice applications may be shareable across a business. If a site has several different areas, each with a login or payment option, the same microservice application can be used in each instance.\n\n### Faster time-to-market\n\nDevelopers can plug this new “microsurgery” into the architecture without fear of conflicts with other code or of creating service outages that ripple across the website. Development teams working on different microservices don't have to wait for each other to finish. Companies can develop and deploy new features quickly and upgrade older components as new technologies allow them to evolve.\n\n### Ability to experiment\n\nDeciding to go forward with experimentation is much easier with microservices architecture.\n\nIt’s simple to roll out new features because each service is independent of the others. If customers don't like it, or the business benefits aren’t clear, it's much easier to roll it back without affecting the rest of the operation.\n\nIf a  new feature is a customer request, a microservices architecture means they’ll get to experience it in weeks, rather than months or years.\n\n### Improved data security\n\nIf the components of the computer systems architecture break down into smaller pieces, sensitive data is protected from intrusions from another area. While there are connections between all microservices, developers can use secure APIs to connect the services. Secure APIs safeguard data by ensuring it is only available to specifically authorized users, applications and servers. If a business requires handling sensitive data such as health or financial information, it's easier to achieve compliance under data security standards such as healthcare's [HIPAA](https://www.hhs.gov/hipaa/index.html) or the European [GDPR](https://gdpr-info.eu).\n\n### Outsourcing flexibility\n\nIt may be necessary for a business to outsource certain functions to third-party partners. Many companies are concerned about protecting intellectual property with a monolithic architecture format. However, a microservices architecture allows businesses to segment areas just for  partners that won’t otherwise disclose core services.\n\n### Team optimization\n\nWhen considering the size of teams you assign to each microservice, consider the two-pizza rule. First articulated by Amazon, which pioneered microservices, the idea is to keep development teams small enough to feed them with two pizzas. Experts explain that this guideline improves work efficiency, allows businesses to achieve goals faster, makes teams easier to manage, creates greater focus among the group and results in higher quality products.\n\n### Attractive for engineers\n\nEngineers find microservices architecture enticing, and companies have a [better chance of finding top-flight talent](/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain/) to work on microservices application development. Microservices rely on the latest engineering practices and developer tools. This provides an important advantage for businesses hoping to attract specialists.\n\n## Disadvantages of microservices\n\nWhile there are a solid number of advantages for any business, there are also a few disadvantages of microservices to consider before adoption.\n\n### Upfront costs are higher with microservices\n\nWhile cloud microservices are a pro, such as saving money over the long run, there are cons, such as the costs associated with their initial deployment. A business needs to have sufficient hosting infrastructure with security and maintenance support. Even more important, it will need skilled teams to manage all services.\n\n### Interface control is crucial\n\nSince each microservice has its own API, any application using that service will be affected if you change the API, and that change is not backward compatible. Any large operation using a microservices architecture will have hundreds, even thousands, of APIs so controlling those interfaces becomes critical to the business's operation, which can be a disadvantage to microservices architecture.\n\n### A different kind of complexity\n\nDebugging can be more challenging with a microservices architecture. Each microservice will have its own set of logs. This provides a minor headache when tracing the source of a problem in the code.\n\n### Integration testing\n\nUnit testing is more manageable with microservices architecture. Integration testing is not. Since the architecture distributes each microservice, developers cannot test the entire system from their machines.\n\n### Service-oriented architecture vs. microservices\n\nIf you work in cloud computing, you're probably aware of the [service-oriented architecture (SOA)](https://www.techtarget.com/searchapparchitecture/definition/service-oriented-architecture-SOA) versus microservices debate. In many ways, the two architectures are similar as they both involve cloud computing for agile development. Both break large monolithic components into smaller units that are easier to work with.\n\nThe biggest difference is that SOA is an enterprise-wide approach to developing software components. Microservices, meanwhile, build standalone applications that perform a specific function and this cloud-native approach to development and deployment makes them more scalable, agile and resistant. \n\nSo, in essence, the difference between the two comes down to scope. SOA is an enterprise-wide approach, while a microservices architecture has an application scope.\n\nRead on to learn [how to get started with a microservices architecture](/blog/get-started-with-microservices-architecture/).\n",[1128,1128,836],{"slug":2040,"featured":93,"template":683},"what-are-the-benefits-of-a-microservices-architecture","content:en-us:blog:what-are-the-benefits-of-a-microservices-architecture.yml","What Are The Benefits Of A Microservices Architecture","en-us/blog/what-are-the-benefits-of-a-microservices-architecture.yml","en-us/blog/what-are-the-benefits-of-a-microservices-architecture",{"_path":2046,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2047,"content":2053,"config":2058,"_id":2060,"_type":16,"title":2061,"_source":18,"_file":2062,"_stem":2063,"_extension":21},"/en-us/blog/mobile-devops-with-gitlab-part-2",{"title":2048,"description":2049,"ogTitle":2048,"ogDescription":2049,"noIndex":6,"ogImage":2050,"ogUrl":2051,"ogSiteName":697,"ogType":698,"canonicalUrls":2051,"schema":2052},"Mobile DevOps with GitLab, Part 2 - Code signing for Android with GitLab","This second part of our tutorial series shows how to use Project-level Secure Files to sign an Android application.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668592/Blog/Hero%20Images/teddy-gr--adWwTRAm1g-unsplash.jpg","https://about.gitlab.com/blog/mobile-devops-with-gitlab-part-2","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Mobile DevOps with GitLab, Part 2 - Code signing for Android with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darby Frey\"}],\n        \"datePublished\": \"2022-09-28\",\n      }",{"title":2048,"description":2049,"authors":2054,"heroImage":2050,"date":2055,"body":2056,"category":14,"tags":2057},[2016],"2022-09-28","\n\nIn Part 1 of this tutorial series, we talked about a new feature in GitLab called [Project-level Secure Files](/blog/mobile-devops-with-gitlab-part-1/). With Project-level Secure Files, you can securely store your build keys as part of your project in GitLab, and avoid [some](https://www.reddit.com/r/androiddev/comments/a4ydhj/how_to_update_app_when_lost_keystore_file/) [painful](https://www.reddit.com/r/gamemaker/comments/v98den/lost_keystore_for_publishing_to_google_play_store/) [problems](https://www.reddit.com/r/androiddev/comments/95oa55/is_there_anyway_to_update_my_app_after_having/) caused by lost keystore files.\n\nIn this blog post, I'll show you how to create a Keystore file and use it to sign an Android application. Then I'll show you how to quickly create a CI pipeline in GitLab using Project-level Secure Files.\n\n## Generate a private signing key\n\nThe first thing you'll need is a Keystore file. This file is used to securely sign the application. You can generate a Keystore file from your machine by running the following command:\n\n```\nkeytool -genkey -v -keystore release-keystore.jks -alias release -keyalg RSA -keysize 2048 -validity 10000\n```\n\nDuring this process, you'll be asked to create a new password for the Keystore file and provide some information about you and your organization. See the example below:\n\n![Generate Android Keystore](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-2-code-signing-for-android-with-gitlab/generate-keystore.png)\n\n\n## Configure your application\n\nThe next step is to set some environment variables and update build.gradle to add the new signing configuration. First, set the following environment variables in either a .env file or in the shell via export.\n\n* `ANDROID_KEY_ALIAS` is the alias you gave for the key in the keytool command above. In this example the value is release.\n* `ANDROID_KEYSTORE_PASSWORD` is the new password you supplied to the keytool command above.\n* `ANDROID_KEY_STOREFILE` is the path to the new keystore file you just created. In this example we're using `../release-keystore.jks`.\n\nWith the environment variables set, the next step is to update the build configuration to use the new Keystore in the build process. In the `app/build.gradle` file add the following configuration inside the Android block for the release signing config.\n\n```\nandroid {\n    ...\n    defaultConfig { ... }\n    signingConfigs {\n        release {\n           storeFile file(System.getenv('ANDROID_KEY_STOREFILE'))\n           storePassword System.getenv('ANDROID_KEYSTORE_PASSWORD')\n           keyAlias System.getenv('ANDROID_KEY_ALIAS')\n           keyPassword System.getenv('ANDROID_KEYSTORE_PASSWORD')\n        }\n    }\n    buildTypes {\n        release {\n            ...\n            signingConfig signingConfigs.release\n        }\n    }\n}\n```\n\nSave these changes to the `app/build.gradle file`, and run the build locally to ensure everything works. Use the following command to run the build:\n\n```\n./gradlew assembleRelease\n```\n\nIf everything worked you'll see a message saying **BUILD SUCCESSFUL**.\n\n## Configure project\n\nWith the build running locally, it takes just a couple of steps to get it running in GitLab [CI](/topics/ci-cd/). The first step is to upload your Keystore file in GitLab. \n\n1. On the top bar, select **Menu > Projects** and find your project.\n2. On the left sidebar, select **Settings > CI/CD**.\n3. In the **Secure Files** section, select **Expand**.\n4. Select **Upload File**.\n5. Find the file to upload, select **Open**, and the file upload begins immediately. The file shows up in the list when the upload is complete.\n\n![Upload Secure File](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-2-code-signing-for-android-with-gitlab/upload-secure-file.png)\n\n![List Secure Files](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-2-code-signing-for-android-with-gitlab/list-secure-files.png)\n\nThe next step is to set the CI variables in your project. \n\n1. On the top bar, select **Menu > Projects** and find your project.\n2. On the left sidebar, select **Settings > CI/CD**.\n3. In the **Variables** section, select **Expand**.\n4. Create entries for the three environment variables set earlier: `ANDROID_KEY_ALIAS`, `ANDROID_KEY_STOREFILE`, `ANDROID_KEYSTORE_PASSWORD`.\n\n![List Secure Files](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-2-code-signing-for-android-with-gitlab/list-ci-variables.png)\n\n## CI/CD pipelines\n\nOnce the project is configured, the final step is to create the build configuration in the `.gitlab-ci.yml` file. Below is a sample file.\n\n```\nstages:\n  - build\n\nbuild_android:\n  image: fabernovel/android:api-31-v1.6.1\n  stage: build\n  variables:\n    SECURE_FILES_DOWNLOAD_PATH: './'\n  script:\n    - apt update && apt install -y curl\n    - curl --silent \"https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/download-secure-files/-/raw/main/installer\" | bash\n    - ./gradlew assembleRelease\n  artifacts:\n    paths:\n      - app/build/outputs/apk/release\n```\n\nA few interesting bits from this configuration:\n\n1. Image: [https://github.com/faberNovel/docker-android](https://github.com/faberNovel/docker-android) provides a collection of prebuilt Docker images that work great for CI systems. Find the right version for your project in Docker Hub [https://hub.docker.com/r/fabernovel/android/tags](https://hub.docker.com/r/fabernovel/android/tags). \n2. Script: Depending on the image, you may need to install curl; the first line of the example script installs curl to be used in the second line to download and execute the [download-secure-files](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/download-secure-files) tool.\n3. Variables: `SECURE_FILES_DOWNLOAD_PATH` tells [download-secure-files](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/download-secure-files) where to download the Keystore file.\n4. Artifacts: Make the build output available to be downloaded from the CI job, or used in subsequent jobs in the pipeline.\n\nCommit the changes to your `.gitlab-ci.yml` file and after you push the changes to GitLab the build will start.\n\nTake a look at [this branch in the sample project](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/android_demo/-/tree/basic_build) for reference.\n\nGive it a try, and let us know what you think in the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/362407). Then, check out Part 3, which deals with [code signing for iOS](/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane/). \n\n\n\n_Cover image by  \u003Ca href=\"https://unsplash.com/@teddygr?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Teddy GR\u003C/a> on \u003Ca href=\"https://unsplash.com/s/photos/google-phone?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>_\n",[1128,1466,836,111],{"slug":2059,"featured":6,"template":683},"mobile-devops-with-gitlab-part-2","content:en-us:blog:mobile-devops-with-gitlab-part-2.yml","Mobile Devops With Gitlab Part 2","en-us/blog/mobile-devops-with-gitlab-part-2.yml","en-us/blog/mobile-devops-with-gitlab-part-2",{"_path":2065,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2066,"content":2072,"config":2077,"_id":2079,"_type":16,"title":2080,"_source":18,"_file":2081,"_stem":2082,"_extension":21},"/en-us/blog/less-headaches",{"title":2067,"description":2068,"ogTitle":2067,"ogDescription":2068,"noIndex":6,"ogImage":2069,"ogUrl":2070,"ogSiteName":697,"ogType":698,"canonicalUrls":2070,"schema":2071},"Two DevOps platform superpowers: Visibility and actionability","Migrating to a DevOps platform helps organizations better understand and improve their development lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668622/Blog/Hero%20Images/group-rowing-collaboration.jpg","https://about.gitlab.com/blog/less-headaches","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Two DevOps platform superpowers: Visibility and actionability\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-09-26\",\n      }",{"title":2067,"description":2068,"authors":2073,"heroImage":2069,"date":2074,"body":2075,"category":14,"tags":2076},[1364],"2022-09-26","\nA [DevOps platform](/blog/the-journey-to-a-devops-platform/) deployed as a single application takes DevOps gains to the next level, enabling teams to deliver more value to their organization with fewer headaches. A platform, which includes the ability to plan, develop, test, secure, and operate software, empowers teams to deliver software faster, more efficiently, and more securely. And that [makes the business more competitive and more agile](/blog/the-devops-platform-series-building-a-business-case/).\n\n## DevOps visability and actionability\n\nA complete DevOps platform gives organizations everything they need to turn ideas into valuable and secure software without the time-consuming and costly headaches that multiple tools and multiple UXes bring. A single, end-to-end platform also gives teams one data store sitting underneath everything they do, and, regardless of the interface they are using, allows them to easily surface insights about developer productivity, workflow efficiency, and DevOps practice adoption.\n\nThere are many benefits to a DevOps platform, including visibility and actionability.\n\n### Gain visibility and context\n\nA DevOps platform enables DevOps teams to see and understand what’s happening in their organization, and provide context for those events. With insights that go much deeper than what a simple report or dashboard can offer, DevOps teams can better understand the status of projects, as well as their impact.\n\n### Take action more easily\n\nActionability means users can take that contextual information and efficiently and quickly do something with it at the point of understanding. Users can move a project ahead more quickly because they don’t have to wait to have a synchronous conversation or meeting to review the new information.\n\nHere are a few ways that an end-to-end platform provides visibility and actionability.\n\n### Track projects with epics and issues\n\nIn a DevOps platform, users are better able to communicate, plan work, and collaborate by using epics and issues. [Epics](https://docs.gitlab.com/ee/user/group/epics/) are an overview of a project, idea, or workflow. Issues are used to organize and list out what needs to be done to complete the larger goal, to track tasks and work status, or work on code implementations.\n\nFor instance, if managers want an overview of how multiple projects, programs, or products are progressing, they can get that kind of visibility by checking an epic, which will give them a high-level rollup view of what is being worked on, what has been completed, and what is on schedule or delayed. Users can call up an epic to quickly see what’s been accomplished and what is still under way, and then they can dig deeper into sub-epics and related issues for more information.\n\n[Issues](https://docs.gitlab.com/ee/user/project/issues/) offer details about implementation of specific goals, trace collaboration on that topic, and show which parts of the initiative team members are taking on. Users also can see whether due dates have been met or not. Issues can be used to reassign pieces of work, give updates, make comments or suggestions, and see how the nuts and bolts are being created and moved around.\n\n### Labels help track and search projects\n\nLabels are classification tags, which are often assigned colors and descriptive titles like \"bug\", \"feature request\", or \"docs\" to make them easy to understand. They are used in epics, issues, and merge requests to help users organize their work and ideas. They give users at-a-glance insight about what teams are working on a project, the focus of the work, and where it stands in the development lifecycle. Labels can be added and removed as work progresses to enable better tracking and searching.\n\n### Dashboards help track metrics\n\nDashboards are reporting tools that pull together metrics from multiple tools to create an at-a-glance view of projects, [security issues](/blog/secure-stage-for-appsec/), the health of different environments, or requests coming in for specific departments or teams, for instance. DevOps platform users can set up live dashboards to see trends in real time, map processes, and track response times, [errors](/blog/iteration-on-error-tracking/), and deployment speed. Dashboards also can be used to see alert statuses and the effect on specific applications and the business overall.\n\n### Value stream analytics\n\nFor visibility without any customization required, there are [value stream analytics](/blog/gitlab-value-stream-analytics/). This interface automatically pulls in data to show users how long it takes the team to complete each stage in their workflow – across planning, development, deployment, and monitoring. This gives developers or product owners – or anyone who wants information on workflow efficiency –  [a look at high-level metrics](/solutions/value-stream-management/), like deployment frequency. This is actionable information so it also shows what part of the project is taking the most time or what is holding up progress. Based on this information, the user can suggest changes, like moving milestones or assigning the work to someone new, and enact those changes with just one click.\n\nWith a DevOps platform, teams have end-to-end visibility that also is actionable. By enabling users to find the information they need with the context they need and giving them the ability to make immediate changes, data becomes actionable. Using a single platform, teams can move projects along more quickly, iterate faster, and create more value and company agility.\n\nCheck out our [Migrating to a DevOps platform eBook](https://page.gitlab.com/migrate-to-devops-guide.html?_gl=1*6p1rz*_ga*MTI3MzMwNjYwMi4xNjYyOTg0OTAw*_ga_ENFH3X7M5Y*MTY2Mzk0NDY1Mi4zOS4xLjE2NjM5NDQ2NjEuMC4wLjA.) for even more useful information about how to complete a successful DevOps platform migration\n\n",[1128,1786,728],{"slug":2078,"featured":6,"template":683},"less-headaches","content:en-us:blog:less-headaches.yml","Less Headaches","en-us/blog/less-headaches.yml","en-us/blog/less-headaches",{"_path":2084,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2085,"content":2091,"config":2097,"_id":2099,"_type":16,"title":2100,"_source":18,"_file":2101,"_stem":2102,"_extension":21},"/en-us/blog/get-started-with-microservices-architecture",{"title":2086,"description":2087,"ogTitle":2086,"ogDescription":2087,"noIndex":6,"ogImage":2088,"ogUrl":2089,"ogSiteName":697,"ogType":698,"canonicalUrls":2089,"schema":2090},"Get started with microservices architecture","For DevOps teams ready to take the next step, adopting a microservices architecture is a smart choice. Here's what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667875/Blog/Hero%20Images/trends-in-version-control-land-microservices-cover.jpg","https://about.gitlab.com/blog/get-started-with-microservices-architecture","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get started with microservices architecture\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-09-20\",\n      }",{"title":2086,"description":2087,"authors":2092,"heroImage":2088,"date":2093,"body":2094,"category":14,"tags":2095},[1524],"2022-09-20","A great way to jumpstart a DevOps practice is by adopting a microservices architecture. The [benefits of a microservices architecture](/blog/what-are-the-benefits-of-a-microservices-architecture/) are numerous and include improved scalability, enhanced fault isolation, and the ability to bring new features to market faster.\n\n## How to start building with microservices architecture\n\n### Identify decomposable aspects of the application\n\nOne of the main properties of a microservice is its independence, so identifying the decomposable parts of the application — those parts that can work autonomously — is essential. Getting the service boundaries wrong could result in unwanted changes to other services, so you need to understand the system’s domain.\n\nIn many cases, such breakdown aligns with the business domains and is reflected in development teams.\n\n### Determine the metrics to monitor\n\nWith a microservices application, it’s crucial to monitor the status of each service so it’s possible to react to changing demands in the production environment. Some common metrics to monitor include the CPU and memory usage of each host, the API response time, and the error rate.\n\nWithout monitoring, teams may not catch problems when they arise. For example, if a server is overwhelmed by traffic, other services may not respond because they’re trying to communicate with an over-burdened service. \n\nBeing able to visualize these potential issues helps prevent downtime. Therefore, establish metrics early so necessary adjustments can be made as soon as possible.\n\n## Best practices for deploying and managing microservices\n\n### Infrastructure automation\n\nWhen the number of microservices grows, an application can become difficult to manage. Each microservice has its own deployment schedule. \n\nSome features are hidden behind feature flags, some are collecting usage data through A/B testing, and some services might be using Canary deployments as part of a progressive deployment. \n\nAutomated testing is key so teams will have the ability to stop or roll back deployment when necessary.\n\n### Consumer-driven contract tests\n\nWhen other consumers depend on API endpoints in one microservice, it’s good practice to implement consumer-driven contract testing to ensure version compatibility. \n\nTraditionally, developers first create the APIs on the server side and have clients determine which endpoints to call. That means when the signature of an API changes, it can bring down the consumer.\n\nThis can’t happen with consumer-driven contract testing because, before deploying a microservice to production, consumers determine the required contract (API signature) and test to be sure they are still valid.\n\n### Monitor key metrics\n\nOnce key metrics have been determined, they must be constantly monitored and able to respond to any events detected. This can be difficult, but fortunately, there are tools that simplify monitoring and provide comprehensive visualization.\n\n## Microservices architecture and DevOps\n\nBy decomposing a software system into autonomous parts, [microservices architecture](/topics/microservices/) allows companies to apply the single responsibility principle to individual teams. It allows them to manage all aspects of a service independently: the team’s technical stack, team composition, deployment strategies, and even release schedules.\n\nMicroservices architecture, alongside continuous delivery, allows businesses to make decisions based on live production data, thereby expediting feedback loops and reducing the time to market.\n\nTo get started with microservices architecture, it’s a good idea to first develop strong intuitions in decomposing a large system and get a good knowledge base of CI/CD practices. Regardless of the architectural style you choose, these skills will be useful.",[1128,792,2096],"google",{"slug":2098,"featured":6,"template":683},"get-started-with-microservices-architecture","content:en-us:blog:get-started-with-microservices-architecture.yml","Get Started With Microservices Architecture","en-us/blog/get-started-with-microservices-architecture.yml","en-us/blog/get-started-with-microservices-architecture",{"_path":2104,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2105,"content":2111,"config":2116,"_id":2118,"_type":16,"title":2119,"_source":18,"_file":2120,"_stem":2121,"_extension":21},"/en-us/blog/mobile-devops-with-gitlab-part-1",{"title":2106,"description":2107,"ogTitle":2106,"ogDescription":2107,"noIndex":6,"ogImage":2108,"ogUrl":2109,"ogSiteName":697,"ogType":698,"canonicalUrls":2109,"schema":2110},"Mobile DevOps: Code signing with project-level secure files","An introduction to mobile code signing with the new Project-level Secure Files feature.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668629/Blog/Hero%20Images/refargotohp-mzZp_9QpYLc-unsplash.jpg","https://about.gitlab.com/blog/mobile-devops-with-gitlab-part-1","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Mobile DevOps with GitLab, Part 1 - Code signing with Project-level Secure Files\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darby Frey\"}],\n        \"datePublished\": \"2022-09-20\",\n      }",{"title":2112,"description":2107,"authors":2113,"heroImage":2108,"date":2093,"body":2114,"category":14,"tags":2115},"Mobile DevOps with GitLab, Part 1 - Code signing with Project-level Secure Files",[2016],"\n\nMobile teams face some unique challenges when it comes to establishing DevOps practices. Build tools are different, release and approval cycles with app stores can be slower and introduce more risk, and some applications require specialized runners. At GitLab, we are focused on finding solutions to these challenges to make it easier for [everyone to contribute](/company/mission/#everyone-can-contribute)! Starting with mobile code signing.\n\nThis post is the first in a series on mobile DevOps and it shows how GitLab makes code signing easier using a new feature called Project-level Secure Files.\n\n## A brief introduction to mobile code signing\n\nAndroid and iOS projects require special configuration files for secure application code signing to ensure an application on a user's device hasn't been tampered with. These configuration files can be challenging to manage in a [CI environment](/topics/ci-cd/benefits-continuous-integration/). Keystores, signing certificates, and provisioning profiles shouldn't be stored in version control because they contain sensitive information. These files are also binary (not text), so they can't easily be stored as CI variables.\n\nTo make this process easier, [we've introduced a feature in GitLab 15.0 called Project-level Secure Files](/releases/2022/05/22/gitlab-15-0-released/#project-level-secure-files-in-open-beta). This feature allows these files to be stored securely as part of a GitLab project but outside version control. Secure Files can then easily be loaded into a CI job when it's time to execute the code signing process.\n\nGet started by adding a secure file to a project:\n\n1. On the top bar, select **Menu > Projects** and find your project.\n2. On the left sidebar, select **Settings > CI/CD**.\n3. In the **Secure Files** section, select **Expand**.\n4. Select **Upload File**.\n5. Find the file to upload, select **Open**, and the file upload begins immediately. The file shows up in the list when the upload is complete.\n\n![Upload Secure File](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-1-introducing-project-level-secure-files/upload-secure-file.png)\n\n![List Secure Files](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-1-introducing-project-level-secure-files/list-secure-files.png)\n\nWith the files securely stored with the project, the next step is to load them into a [CI/CD](/topics/ci-cd/) job. To use your secure files in a CI/CD job, you must use the [download-secure-files](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/download-secure-files) tool to download the files in the job. After downloading them, these files can be used in any CI job.\n\nAdd a command in the script section of your job to download the download-secure-files tool and execute it. It's also important to specify the download location for the secure files by setting the desired path in the `SECURE_FILES_DOWNLOAD_PATH` [CI/CD variable](https://docs.gitlab.com/ee/ci/variables/index.html).\n\nFor example:\n\n```\ntest:\n  variables:\n    SECURE_FILES_DOWNLOAD_PATH: './where/files/should/go/'\n  script:\n    - curl --silent \"https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/download-secure-files/-/raw/main/installer\" | bash\n```\n\nNow, when the CI job runs, all of the secure files will be available in the location specified. They can then be passed into a build script or loaded into the Apple keychain. \n\nThat's it! Give it a try, and let us know what you think in the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/362407).\n\nNext time we will walk through [how to set up code signing for an Android app](/blog/mobile-devops-with-gitlab-part-2/).\n\nCover image by \u003Ca href=\"https://unsplash.com/@refargotohp?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">refargotohp\u003C/a> on \u003Ca href=\"https://unsplash.com/s/photos/mobile-app-building?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>.\n",[1466,1128,111],{"slug":2117,"featured":6,"template":683},"mobile-devops-with-gitlab-part-1","content:en-us:blog:mobile-devops-with-gitlab-part-1.yml","Mobile Devops With Gitlab Part 1","en-us/blog/mobile-devops-with-gitlab-part-1.yml","en-us/blog/mobile-devops-with-gitlab-part-1",{"_path":2123,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2124,"content":2130,"config":2135,"_id":2137,"_type":16,"title":2138,"_source":18,"_file":2139,"_stem":2140,"_extension":21},"/en-us/blog/why-ai-in-devops-is-here-to-stay",{"title":2125,"description":2126,"ogTitle":2125,"ogDescription":2126,"noIndex":6,"ogImage":2127,"ogUrl":2128,"ogSiteName":697,"ogType":698,"canonicalUrls":2128,"schema":2129},"Why AI in DevOps is here to stay","Two years ago artificial intelligence wasn't part of mainstream software development. Now AI in DevOps is seemingly everywhere. Here's why.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664015/Blog/Hero%20Images/laptop.jpg","https://about.gitlab.com/blog/why-ai-in-devops-is-here-to-stay","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why AI in DevOps is here to stay\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-09-15\",\n      }",{"title":2125,"description":2126,"authors":2131,"heroImage":2127,"date":2132,"body":2133,"category":14,"tags":2134},[1859],"2022-09-15","\nIn 2020, respondents to our annual Global DevSecOps Survey started mentioning artificial intelligence and machine learning for the first time. In that survey, roughly 16% of respondents were using “bots” to test code, or were planning to, while 12% of devs said knowledge of AI/ML would be critical to their future.\n\nFast forward just two years and [AI in DevOps](/topics/devops/the-role-of-ai-in-devops/) is a reality in teams around the world, according to our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/). \n\n- 24% of respondents said their DevOps practices include AI/ML, more than double the 2021 percentage.\n\n- 31% of teams are using AI/ML for code review, 16 points higher than last year. \n\n- Today 37% of teams use AI/ML in software testing (up from 25% in 2021), and 20% plan to introduce it this year. Another 19% plan to roll out AI/ML-powered testing in the next two to three years.\n\n- Fully 62% of survey takers are practicing ModelOps.\n\n- 51% use AI/ML to check (not test) code. \n\nAll told, only 5% of teams said they had _no plans_ to incorporate AI in DevOps.\n\nHere's a snapshot of where AI in DevOps is today and why, despite some challenges, AI will likely play an increasingly important role.\n\n## Why AI in DevOps\n\nIn many ways, [DevOps and AI/ML](/blog/ai-in-software-development/) are the perfect marriage: DevOps requires automation to reach maximum efficiency and AI/ML are obvious choices to tackle repetitive tasks. Imagine adding team members entirely focused on a single job, with incredible attention to detail and no need for vacations or even a coffee break – that’s an ML “bot” in a nutshell. \n\nWhen we asked DevOps teams what the most common reasons were for [software release delays](/blog/top-reasons-for-software-release-delays/), the answers called out steps that are critical but manual, tedious, time-consuming, and potentially rife with errors: [software testing](/blog/the-gitlab-guide-to-modern-software-testing/), code review, security testing and code development. For many teams, AI/ML could be key in streamlining these processes.\n\n## Smarter software testing\n\nNo DevOps process is perhaps in more need of streamlining than software testing, which is no doubt why teams have been adding AI/ML into the mix for several years now. Testing is that process [everyone loves to hate](/blog/the-software-testing-life-cycle-in-2021-a-more-upbeat-outlook/), but it is also the step that needs to happen more often in all the ways, or at least that’s what developers tell us year after year. But there are so many different kinds of tests, limited development time, and even more constrained QA teams. Machine learning bots can help bridge the manpower gap, freeing up resources to focus on tests best done by humans. \n\nAnd increased testing creates another issue – test data management – that could ideally be triaged and dealt with using AI.\n\n## The benefits of ModelOps\n\nAI/ML solutions have also made their way into other DevOps steps, specifically [ModelOps](/direction/modelops/). Not only is this an area GitLab is focusing on ([beginning with smarter code reviews](/blog/the-road-to-smarter-code-reviewer-recommendations/)), but more than half of DevOps teams report they’re exploring what’s involved in bringing data science and operations together. \n\n## Beware the learning curve\n\nArtificial intelligence and machine learning are not without their challenges, however. In our 2022 survey, developers expressed very real concerns about the steep learning curves involved in the technology adoption. “Technology is rapidly changing,” was a thought shared by many developers, alongside “implementing AI is an enormous challenge.” \n\nOne developer summed it up: “4G, 5G, AI, Metaverse, virtual space - developers have to support all of this.”\n\nBrendan O'Leary, [staff developer evangelist at GitLab](/company/team/#brendan), says AI naturally has a big learning curve because it requires experimentation. \"This is not just a programming language,\" he explains. \"We've got some data and a hypothesis around it and AI is what's going to help us prove it. This is a different kind of experiment than other kinds of coding... we've got to learn how to measure the impact, understand it, and iterate on it. It's a different kind of paradigm.\"\n",[1128,1589,233],{"slug":2136,"featured":6,"template":683},"why-ai-in-devops-is-here-to-stay","content:en-us:blog:why-ai-in-devops-is-here-to-stay.yml","Why Ai In Devops Is Here To Stay","en-us/blog/why-ai-in-devops-is-here-to-stay.yml","en-us/blog/why-ai-in-devops-is-here-to-stay",{"_path":2142,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2143,"content":2149,"config":2154,"_id":2156,"_type":16,"title":2157,"_source":18,"_file":2158,"_stem":2159,"_extension":21},"/en-us/blog/new-to-devops-take-our-devops-for-beginners-quiz",{"title":2144,"description":2145,"ogTitle":2144,"ogDescription":2145,"noIndex":6,"ogImage":2146,"ogUrl":2147,"ogSiteName":697,"ogType":698,"canonicalUrls":2147,"schema":2148},"New to DevOps? Take our DevOps for beginners quiz","We asked nearly 1400 DevOps beginners about their priorities and challenges for 2022. See how you compare, and take our short DevOps for beginners quiz.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663743/Blog/Hero%20Images/three-things-i-learned-in-my-first-month-at-gitlab.jpg","https://about.gitlab.com/blog/new-to-devops-take-our-devops-for-beginners-quiz","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New to DevOps? Take our DevOps for beginners quiz\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-09-13\",\n      }",{"title":2144,"description":2145,"authors":2150,"heroImage":2146,"date":2151,"body":2152,"category":14,"tags":2153},[1524],"2022-09-13","__Update: The quiz mentioned here has been closed.__\n\nOver the last 12 months, we’ve asked three [\"DevOps for beginners\"](https://about.gitlab.com/topics/devops/beginner-devops-platform/) questions of nearly 1400 people:\n\n- What’s the most important skill you hope to learn this year?\n- What continues to be your team’s biggest DevOps challenge?\n- What is your DevOps team’s top priority for 2022?\n\nA resounding majority (nearly 83%) told us they want to learn a new programming language and about 15% hope to get better at automation.\n\n(Learn the basics of Python with our [5-part series](/blog/learn-python-with-pj-part-1/), understand [Rust](/blog/rust-programming-language/), or [get started with CI/CD](/blog/beginner-guide-ci-cd/).)\n\nWhat are they struggling with?\n\nJust over 70% said security was the biggest challenge for their DevOps team this year (a result that tracks with our just released [Global DevSecOps Survey](/developer-survey/)), while just shy of 24% said it was testing (again, that’s [a very common complaint](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/)). \n\nThe top priorities for 2022 were split between increasing automation (47%) and moving to a [DevOps platform](/topics/devops-platform/) (23%).\n\nAnd we have more DevOps for beginner resources here:\n\n[Beginner’s Guide to DevOps eBook](https://page.gitlab.com/resources-ebook-beginners-guide-devops.html)\n\nA [step-by-step](/blog/if-its-time-to-learn-devops-heres-where-to-begin/) look at how to get started with DevOps\n\nA [guide to Git for beginners](/blog/beginner-git-guide/)\n\n[Continuous integration](/blog/a-beginners-guide-to-continuous-integration/)for beginners\n",[1128,1589,1069],{"slug":2155,"featured":6,"template":683},"new-to-devops-take-our-devops-for-beginners-quiz","content:en-us:blog:new-to-devops-take-our-devops-for-beginners-quiz.yml","New To Devops Take Our Devops For Beginners Quiz","en-us/blog/new-to-devops-take-our-devops-for-beginners-quiz.yml","en-us/blog/new-to-devops-take-our-devops-for-beginners-quiz",{"_path":2161,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2162,"content":2168,"config":2174,"_id":2176,"_type":16,"title":2177,"_source":18,"_file":2178,"_stem":2179,"_extension":21},"/en-us/blog/how-smbs-can-save-with-gitlabs-devops-platform",{"title":2163,"description":2164,"ogTitle":2163,"ogDescription":2164,"noIndex":6,"ogImage":2165,"ogUrl":2166,"ogSiteName":697,"ogType":698,"canonicalUrls":2166,"schema":2167},"How SMBs can save with the GitLab DevOps Platform","Use our ROI Calculator to understand how a DevOps platform saves money.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667966/Blog/Hero%20Images/global-compensation-calculator-iteration.jpg","https://about.gitlab.com/blog/how-smbs-can-save-with-gitlabs-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How SMBs can save with the GitLab DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-09-08\",\n      }",{"title":2163,"description":2164,"authors":2169,"heroImage":2165,"date":2170,"body":2171,"category":14,"tags":2172},[1364],"2022-09-08","\n\nMigrating from a complex and costly DevOps toolchain to The One DevOps Platform from GitLab can not only save a small and medium-sized business (SMB) from an inefficient workload, it can result in a big financial savings, too.\n\nAnd that savings could mean the difference between an [SMB failing and thriving](/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform/) in a cutthroat and unpredictable market. That’s right… GitLab’s end-to-end platform can turn IT into a business driver that speeds software creation, boosting competitiveness and pulling in more revenue. \n\nThat’s key for SMBs, which have small IT teams or maybe even a team of one. That means there are fewer hands to do the work, and likely less financial resources. SMBs might also have a harder time hiring general IT people who also can develop and deploy code. The benefits of a single DevOps platform help solve several SMB issues.\n\n“Where [migration is an investment](https://learn.gitlab.com/smbmigrationguide/migratedevopssmb) in time and change, it’s an investment that will pay a lot of dividends in time and money savings,” says [Brendan O’Leary](https://gitlab.com/brendan), staff developer evangelist at GitLab. “If it’s done right, the ROI will be very quick. You can get rid of all those other tools, while speeding up your ability to iterate and serve your customers.”\n\n## Use an ROI calculator\n\nWith so many factors to consider, how can IT managers measure potential savings? \n\nGitLab can help with that. Check out our [ROI Calculator](https://about.gitlab.com/calculator/roi/), which can help estimate the financial benefits an SMB could realize by moving to GitLab from their DIY DevOps toolchain.\n\nGetting rid of a tangle of disparate tools means freeing up money spent on licensing fees, as well as on updating and maintenance. It also means freeing up all the time IT people spend context switching between these tools. And don’t forget The One DevOps Platform is going to help teams develop and deploy faster and more efficiently, [making SMBs more nimble and competitive](/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform/). \n\n## How to save with GitLab\n\nLet’s look at [how an SMB can save money](https://cdn.pathfactory.com/assets/10519/contents/427544/b901b768-7b0e-4590-b00e-047a80536cdb.pdf) by shifting from a complex toolchain to GitLab’s DevOps Platform:\n\n- License fees are obvious costs that need to be considered when trying to calculate ROI. \n\n- The cost of maintaining the software over time also needs to be factored in. \n\n- Consider how much time and energy is spent on tool upgrades, security patching, and monitoring the performance and overall availability of a multitude of tools.\n\n- Because GitLab’s platform speeds development and deployment, productivity increases and that propels revenue and opportunities to grow the company.\n\n- A complicated toolchain that has users continually jumping between tools and switching interfaces creates a chaotic environment that requires constant management, tweaking, updating, and stitching. That means IT is managing the toolchain instead of actually developing and delivering the code that drives the bottom line. Software isn’t efficiently created in a chaotic environment.\n\n- IT is wasting time, energy, and money getting up to speed on each tool. This goes for every new person who has to learn each tool, instead of a single application.\n\n- SMBs find new customers – and the revenue they bring – by creating software that satisfies customers’ needs. You can do that more quickly with a DevOps platform.\n\n- Because GitLab’s DevOps Platform enables companies to develop and deploy more securely, more quickly, and with less hands-on work, SMBs are more able to change on a dime to meet or get ahead of new demands and even new competitors.\n\n- If an IT team is spending time on the care and feeding of a toolchain instead of doing interesting software development, it can cause stress and job dissatisfaction, which could lead to problematic turnover.\n\n- The One DevOps Platform naturally pushes security left so it’s automatically integrated into every step of the development lifecycle. Detecting errors early in the process is much cheaper and less time consuming than detecting them in production. \n\nThat’s a lot of ways to save money and earn extra revenue. And all of that can be done with one single license, one permission model, and one interface, giving teams the time and resources to focus on creating business value instead of managing a toolchain.\n\n## Drilling down on ways to save\n\nGitLab’s DevOps Platform allows teams to move from, or avoid, that often complex and confusing multitude of tools by using a single, complete software development ecosystem. An SMB may be small enough that it hasn’t amassed a complex toolchain – yet. But it will only grow more unwieldy as the company grows. Now is the time to adopt a single platform and avoid that problem all together. \n\n\"It enables us to write better software more efficiently,” said Dorian de Koning, DevOps lead at [Weave](https://weave.nl), a software technology developer based in The Netherlands. “We went from single manual deployment approximately every two weeks to tens of deployments a day.\"\n",[1128,1528,2173],"startups",{"slug":2175,"featured":6,"template":683},"how-smbs-can-save-with-gitlabs-devops-platform","content:en-us:blog:how-smbs-can-save-with-gitlabs-devops-platform.yml","How Smbs Can Save With Gitlabs Devops Platform","en-us/blog/how-smbs-can-save-with-gitlabs-devops-platform.yml","en-us/blog/how-smbs-can-save-with-gitlabs-devops-platform",{"_path":2181,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2182,"content":2188,"config":2193,"_id":2195,"_type":16,"title":2196,"_source":18,"_file":2197,"_stem":2198,"_extension":21},"/en-us/blog/ease-pressure-on-smb-developers-with-a-devops-platform",{"title":2183,"description":2184,"ogTitle":2183,"ogDescription":2184,"noIndex":6,"ogImage":2185,"ogUrl":2186,"ogSiteName":697,"ogType":698,"canonicalUrls":2186,"schema":2187},"Ease pressure on SMB developers with a DevOps platform","Small and medium-sized businesses have to be master multitaskers, but that's not always efficient. Here's how a DevOps platform can help.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668354/Blog/Hero%20Images/handshake.png","https://about.gitlab.com/blog/ease-pressure-on-smb-developers-with-a-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ease pressure on SMB developers with a DevOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-09-06\",\n      }",{"title":2183,"description":2184,"authors":2189,"heroImage":2185,"date":2190,"body":2191,"category":14,"tags":2192},[1364],"2022-09-06","\nAdopting a full, end-to-end DevOps platform eases strain on IT, and that is particularly important in small and medium-sized businesses (SMBs). \n\nSince there’s generally only a handful of IT professionals – at most – working in an SMB, they’re often trying to keep their heads above water. They’re constantly in motion, moving between keeping often less than top-of-the-line systems running, acting as the user help desk, and ensuring company data is safe. They’re not only wearing multiple hats, they’re putting out one fire after another.\n\nWorking under that kind of constant pressure leaves little time and focus for developing and deploying new software, which most [every SMB needs](/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform/) to entice new customers, build the brand, and bring in revenue. Relieving that stress and enabling these tiny IT teams to succeed in creating great software products is about survival. And IT survival is about [adopting a DevOps platform](https://learn.gitlab.com/smbmigrationguide/migratedevopssmb).\n\nRelying on a full DevOps platform empowers IT professionals and enables them to eliminate wasted time and energy so they can focus on being a business driver. There are many parts of an end-to-end DevOps platform that lead to increased efficiency and decreased pressure on the IT team:\n\n- Automate processes – from testing to performance management and monitoring – to enable IT to be hands off with repetitive and often time-consuming tasks and eliminate the potential for human error that can use up a lot of time and money.\n\n- More quickly and efficiently turn a vision into software.\n\n- [Foster collaboration](/blog/5-ways-collaboration-boosts-productivity-and-your-career/) with people across departments to brainstorm design ideas and more efficiently make iterative deployments.\n\n- [Produce more stable and secure software](/blog/toolchain-security-with-gitlab/) that won’t need last-minute fixes or code re-writes.\n\n- Focus on delivering software instead of [managing toolchains](/blog/battling-toolchain-technical-debt/).\n\n- Stop switching back and forth between multiple tool interfaces, passwords, and ways of working.\n\n- Gain an overarching view of the entire development and deployment lifecycle.\n\n- Keep track of and easily access best practices to use in new projects by taking advantage of [continuous documentation](/blog/16-ways-to-get-the-most-out-of-software-documentation/) in the platform. \n\nLike anyone, IT professionals don’t perform best when they’re in a constant reactive state. Sure, many SMBs have started to use various DevOps tools to relieve the stress on IT, but if they haven’t adopted a single platform, then they’re simply creating more expense and more work for their already overburdened IT staff. That’s because by cobbling together a mishmash of disparate tools, they’re inadvertently creating an unwieldy toolchain that slows down deployment and the business it fuels. \n\nMoving to a full DevOps platform means shedding that costly and complex toolchain, speeding the transition of business vision into working software, and cutting the workload weighing down IT. \n\nAnd relieving that workload also is about keeping employees happy and less stressed. The [greatest resource a company has is its people](/blog/hiring-in-the-deep-end-of-the-talent-pool/). This is even more true for small companies where the pain of employee dissatisfaction and departure is felt even more acutely. Managers also don’t want projects waylaid because the people driving them are leaving. To stop that from happening, it’s critical to help people get their work done efficiently and more easily, which also reduces their stress and makes them happier.\n\nAn end-to-end platform isn’t just another tool. It’s a whole new way of working that can diminish the often chaotic environment that can surround IT. An SMB’s IT people will still wear many different hats but developing and deploying new software and iterations will be easier, more efficient, and less taxing.\n",[1128,1428,1528],{"slug":2194,"featured":6,"template":683},"ease-pressure-on-smb-developers-with-a-devops-platform","content:en-us:blog:ease-pressure-on-smb-developers-with-a-devops-platform.yml","Ease Pressure On Smb Developers With A Devops Platform","en-us/blog/ease-pressure-on-smb-developers-with-a-devops-platform.yml","en-us/blog/ease-pressure-on-smb-developers-with-a-devops-platform",{"_path":2200,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2201,"content":2207,"config":2212,"_id":2214,"_type":16,"title":2215,"_source":18,"_file":2216,"_stem":2217,"_extension":21},"/en-us/blog/what-you-need-to-know-about-devops-audits",{"title":2202,"description":2203,"ogTitle":2202,"ogDescription":2203,"noIndex":6,"ogImage":2204,"ogUrl":2205,"ogSiteName":697,"ogType":698,"canonicalUrls":2205,"schema":2206},"What you need to know about DevOps audits","DevOps’s many steps can streamline the audit process. Here’s how.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668339/Blog/Hero%20Images/a-tale-of-two-editors.jpg","https://about.gitlab.com/blog/what-you-need-to-know-about-devops-audits","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What you need to know about DevOps audits\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-08-31\",\n      }",{"title":2202,"description":2203,"authors":2208,"heroImage":2204,"date":2209,"body":2210,"category":14,"tags":2211},[1859],"2022-08-31","\nWhile presumably no one likes an audit, DevOps teams do have some built-in advantages when it comes to intense levels of internal and external scrutiny. Here’s a quick look at DevOps audits, why they matter, and how teams can set themselves up for audit success.\n\n## Looking under the hood\n\nIn most organizations, there are two types of audits: internal and external. At their most simplistic, internal audits are conducted by people within the existing organization, while external audits are conducted by third parties. Either way, audits look to ensure an organization is compliant, and that’s where things can get a bit complicated.\n\nBeing “compliant” can mean an organization is meeting standards set by the government (like [NIST frameworks](/blog/comply-with-nist-secure-supply-chain-framework-with-gitlab/) or HIPAA regulations), living up to its own governance rules regarding data, security policies and processes, and more, or it can mean some combination of the two. Also, depending on the type of organization and its vertical industry, compliance can have wildly different requirements.\n\nIn the end, it comes down to [being compliant](/blog/the-importance-of-compliance-in-devops/) means keeping track of any data and processes that can prove compliance is happening, and that’s what auditors need to be able to easily access.\n\nObviously, it’s a big job. Way back when, external auditors would literally set up shop in an empty office and spend weeks (or months) sifting through written records, interviewing employees, and even walking the factory floor if necessary. Today, technology, especially automation, have made audits easier to prepare for and carry out, but the plethora of standards bodies and [a growing focus on security risks](/blog/the-ultimate-guide-to-software-supply-chain-security/) mean more time spent auditing than ever before.\n\n## Enter DevOps\n\nThe largely seamless nature of DevOps not only makes it easier to get software out the door more quickly but it also streamlines the audit process. Why? Because automation tracks every step that happens, creating an auditable record, and the “continuous” nature of DevOps also naturally supports the idea of “continuous” or more frequent (and thus easier) audits.\n\n“DevOps is all about building: writing code, building code, testing code, and compiling it,” says [Sam White](/company/team/#sam.white), GitLab’s principal product manager, Protect. “And it's about getting that code built into a deliverable that's actually shipped out to the end user and runs in production. Compliance [in this sense] is all about what regulatory controls and processes have to be followed within the context of writing, building, and shipping software.”\n\n## Audits and DevOps\n\nDevOps processes naturally lend themselves to audits, White explains, because each of the steps can be traced and many, like merge requests, require signoffs. “Compliance regulations can vary across industries and geography. But, generally, what I hear from compliance teams is they need to make sure all of their commits are signed. You want to make sure you don't have a malicious actor putting in bad code. So finding the commits helps you verify who the person was who wrote the code,” he says.\n\nCode review is another obviously “auditable” step in the process, he says, because “it’s very common for organizations to require at least two people to review any code before it gets merged in.” Auditors want to follow the path and DevOps makes it simpler to look at the flow of commits/MRs and code reviews to make sure nothing untoward has happened.\n\n## Track everything\n\nWhile DevOps audit checklists [do exist](https://itrevolution.com/devops-audit-defense-toolkit/), industry compliance requirements vary so widely that a generic list is really only a starting point. But there are basic steps DevOps teams should follow:\n\n- Ensure all code commits have signoffs.\n- Review code on a regular cadence and require at least two signatures.\n- Logging tools are critical – are they widely used and is the data easy to access?\n- Make sure everyone on the team understands the concept of compliance as it relates to a particular industry.\n- Acknowledge that developers aren’t auditors 😀.\n- Check in on operations pros, who are increasingly being tasked with compliance but also report [suffering from information overload](/developer-survey/).\n\nLearn about GitLab’s vision for [compliance management](/direction/govern/compliance/compliance-management/).\n\n_Lauren Minning contributed to this blog post._\n",[1128,1589,750],{"slug":2213,"featured":6,"template":683},"what-you-need-to-know-about-devops-audits","content:en-us:blog:what-you-need-to-know-about-devops-audits.yml","What You Need To Know About Devops Audits","en-us/blog/what-you-need-to-know-about-devops-audits.yml","en-us/blog/what-you-need-to-know-about-devops-audits",{"_path":2219,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2220,"content":2226,"config":2231,"_id":2233,"_type":16,"title":2234,"_source":18,"_file":2235,"_stem":2236,"_extension":21},"/en-us/blog/five-essential-business-benefits-a-devops-platform-gives-smbs",{"title":2221,"description":2222,"ogTitle":2221,"ogDescription":2222,"noIndex":6,"ogImage":2223,"ogUrl":2224,"ogSiteName":697,"ogType":698,"canonicalUrls":2224,"schema":2225},"Five essential business benefits a DevOps platform gives SMBs","Multiply your SMB’s tech muscle, reduce expenses, and cut wasted time.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668332/Blog/Hero%20Images/architecture-building-business-258163.jpg","https://about.gitlab.com/blog/five-essential-business-benefits-a-devops-platform-gives-smbs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Five essential business benefits a DevOps platform gives SMBs\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-08-30\",\n      }",{"title":2221,"description":2222,"authors":2227,"heroImage":2223,"date":2228,"body":2229,"category":14,"tags":2230},[1364],"2022-08-30","\nSmall and medium-sized businesses (SMBs) face a litany of potentially crippling obstacles, but there’s a single step executives can take that will create multiple business benefits. \n\n[Migrating to an end-to-end DevOps platform for SMBs](https://page.gitlab.com/migrate-to-devops-guide.html) will not only greatly improve an SMB’s odds of survival, but it will increase their chance of actually thriving in an environment that sees half of all small businesses failing within their first five years. That’s right. All businesses face competition and obstacles, but SMBs and small and medium-sized enterprises (SMEs), in particular, are looking at an uphill battle so steep that 20% of U.S. small businesses fail within just the first year, [according to the U.S. Bureau of Labor Statistics](https://www.bls.gov/bdm/entrepreneurship/entrepreneurship.htm). So why not grab onto any advantage available, especially one this beneficial?\n\nHere’s how [a full DevOps platform can help any SMB](/blog/gitlab-provides-small-business-with-a-professional-mature-devops-platform/):\n\n## Multiply tech muscle\n\nLarge enterprises might have an IT department, or even a separate DevOps group, made up of dozens or hundreds of people. That’s not the case with SMBs and SMEs. A small business might just have one IT person. That leaves one – or two or five – people shouldering a whole lot of work. They’re left not only to handle issues with cybersecurity, email, and buggy laptops, but they also have to design, develop, and deploy new software and iterations. With a DevOps platform, a lot of repetitive tasks are automated, and security testing is built in from the get-go, freeing up a lot of time. With a DevOps platform, it’s possible to do more with fewer hands. \n\n## Engage the entire team\n\n[Fostering collaboration](/blog/5-ways-collaboration-boosts-productivity-and-your-career/) is a big part of a DevOps platform and it’s of particular benefit to SMBs. Yes, there are fewer employees in a smaller organization. That, though, doesn’t have to be a disadvantage. A DevOps platform fosters a collaborative environment, [breaking down departmental silos](/blog/developing-a-successful-devops-strategy/) and enabling everyone – from the head of the business to people in sales, marketing, and customer service – to work together on software planning and design. That means a wider swath of employees can pitch in on projects, naturally bringing more input and help to the table. And that makes software more inclusive and well-rounded. It also makes employees more engaged. \n\n## Stop wasting time and effort on a toolchain\n\nSMBs, like their larger enterprise brethren, turn to DevOps to more efficiently and quickly develop and deploy software. But when they don’t go with a single, end-to-end DevOps platform, they end up creating a complicated tangle of tools, or a toolchain. And these toolchains force them to not only learn, but continually switch back and forth between multiple interfaces, passwords, and ways of working. Even worse, those [taxing toolchains](/topics/devops/use-devops-platform-to-avoid-devops-tax/) only grow in size and unwieldiness as the business grows. With fewer IT people onboard, the full burden of these toolchains falls solely on a limited number of people – or even worse, it might fall on just one person. Get rid of that chaotic environment, and the waste of time and effort it brings, by migrating to a single application. \n\n\n## Eliminate the expense of a toolchain\n\nSince most SMBs have limited budgets, many often turn to DevOps tools that have what initially appear to be smaller price tags. However, by casting around for what might seem like a bargain, it creates an even greater mishmash of tools, which the company continually has to pay for. A [2020 Forrester Consulting Total Economic Impact Study](https://learn.gitlab.com/c/forrester-tei?x=X4W83-) noted that moving to a single DevOps application improves development and delivery efficiency by more than 87%, cuts down on licensing costs, and increases savings. The [expenses that come along](/webcast/simplify-to-accelerate/) with multiple licenses and continual maintenance are diminished with a single, end-to-end DevOps platform, driving the bottom line and delivering business value. \n\n## Improved security benefits for your business\n\nSMBs have the perfect chance to build security into their code and processes from the very beginning. That’s a much better process than [making security an afterthought](/blog/toolchain-security-with-gitlab/), or completely pushing security aside when projects are bumping up against tight deadlines. That won’t happen with a single DevOps platform, which integrates security into the entire software delivery lifecycle – from planning through design, build, and monitoring. Every single step of the development process. A DevOps platform even automates security testing, ensuring it’s not forgotten and relieving IT professionals from some repetitive, hands-on tasks. When [security is shifted left](/blog/efficient-devsecops-nine-tips-shift-left/) this way, if a vulnerability or compliance issue is introduced into the code, it’s identified almost immediately. And improved security doesn’t just benefit your software. It also benefits your customers, your brand reputation, and your overall business.\n\nMost every business, [regardless of size](/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform/), is creating software to serve customers, connect with partners and suppliers, and find new revenue streams. But muddling together a string of tools that end up costing time, effort, and money just to maintain and use them isn’t the answer. If SMBs toss that complicated toolchain aside and replace it with one platform, they’ll expand their IT capabilities, reduce costs, and be better able to take on competitors with more experience and deeper pockets.\n",[1128,233,1528],{"slug":2232,"featured":6,"template":683},"five-essential-business-benefits-a-devops-platform-gives-smbs","content:en-us:blog:five-essential-business-benefits-a-devops-platform-gives-smbs.yml","Five Essential Business Benefits A Devops Platform Gives Smbs","en-us/blog/five-essential-business-benefits-a-devops-platform-gives-smbs.yml","en-us/blog/five-essential-business-benefits-a-devops-platform-gives-smbs",{"_path":2238,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2239,"content":2244,"config":2248,"_id":2250,"_type":16,"title":2251,"_source":18,"_file":2252,"_stem":2253,"_extension":21},"/en-us/blog/the-ultimate-guide-to-software-supply-chain-security",{"title":2240,"description":2241,"ogTitle":2240,"ogDescription":2241,"noIndex":6,"ogImage":1640,"ogUrl":2242,"ogSiteName":697,"ogType":698,"canonicalUrls":2242,"schema":2243},"The ultimate guide to software supply chain security","Coupling DevSecOps with software supply chain security results in the advanced protection organizations need.","https://about.gitlab.com/blog/the-ultimate-guide-to-software-supply-chain-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The ultimate guide to software supply chain security\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-08-30\",\n      }",{"title":2240,"description":2241,"authors":2245,"heroImage":1640,"date":2228,"body":2246,"category":14,"tags":2247},[746],"\n\nThreats to the software supply chain are forcing a sea change in DevOps. Organizations are feeling internal pressure to embed security deep into their software development life cycles and external pressure to comply with numerous federal and industry mandates. What is emerging is a DevSecOps strategy that helps govern how code, applications, and infrastructure are protected across the software supply chain.\n\nThe pairing of DevSecOps with software supply chain security also ensures that, where possible, automation will be used to make processes repeatable, increasing security and reducing the opportunity for human error or malicious activity.   \n\nThis comprehensive guide provides deeper dives into all the aspects of software supply chain security so make sure to follow the embedded links.\n\n## The need for software supply chain security\n\nSecuring code is not a new concept. However, promoting security early on in the development life cycle is. The movement to shift security left has taken off, and “sec” is becoming part of the DevOps culture, morphing the concept wholly into DevSecOps. \n\nAlong with this evolution has been an increase in outside pressure – as formidable as [the federal government](/blog/biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security/) – to batten down software supply chains so that large attacks such as the [SolarWinds hack of 2020](/blog/what-the-solarwinds-attack-can-teach-us-about-devsecops/#a-brief-summary-of-the-solarwinds-incident) won’t threaten the nation’s critical infrastructure and cause unmitigated damage.\n\nEssentially, businesses must figure out how to meld their development, security, and operations teams internally while complying with numerous mandates from external organizations.\n\nLearn more about the key trends driving software supply chain security:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Tbiscg09-Ac\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Integrating sec into DevSecOps\n\nThe first step in securing the software supply chain is to create a cohesive DevSecOps approach to software development. In doing so, organizations can expand security in DevOps beyond basic tasks and better [understand myriad threat vectors](/blog/top-challenges-to-securing-the-software-supply-chain/).\n\n_[Security in the modern DevOps solution](/blog/are-you-ready-for-the-newest-era-of-devsecops/) goes beyond just shifting security features left to empower the developers to find and fix security flaws, but also provides end-to-end visibility and control over the entire SDLC to create, deliver, and run the applications._\n\nTeams that integrate security practices throughout their development process are 1.6 times more likely to meet or exceed their organizational goals, according to the Google Cloud DevOps Research and Assessment (DORA) “Accelerate State of DevOps 2021 Report”.\n\nSome [best practices elite DevSecOps teams use](/blog/elite-team-strategies-to-secure-software-supply-chains/) are:\n\n- Apply common controls for security and compliance\n- Automate common controls and CI/CD\n- Apply [zero-trust principles](/blog/why-devops-and-zero-trust-go-together/)\n- Inventory all tools and access, including infrastructure as code\n- Consider unconventional scale to find unconventional vulnerabilities\n- Secure containers and orchestrators\n\n## Understanding federal and industry mandates\n\nThe Biden administration has been singular in its demand that federal agencies and their vendors [make significant improvements in software supply chain security](https://www.whitehouse.gov/briefing-room/presidential-actions/2021/05/12/executive-order-on-improving-the-nations-cybersecurity/).\n\nThat sense of urgency has trickled down to the standards bodies, including the National Institute of Standards and Technology (NIST) and its [Secure Software Development Framework](https://csrc.nist.gov/Projects/ssdf), the Cybersecurity and Infrastructure Agency’s work on [Software Bill of Materials standards](https://www.cisa.gov/sbom), and [SLSA](https://slsa.dev/), a cross-industry collaboration on a security framework to secure the supply chain.\n\nCompliance officers within organizations are looking to DevSecOps teams to make it easy for them to audit the development life cycle and attest to requirements in these mandates.\n\n## How a DevOps platform helps \n\nIn our [2022 Global DevSecOps survey](/developer-survey/), respondents overwhelmingly told us that secure software development is now an imperative for their organization and that they believe security is the top reason to deploy a DevOps platform. \n\nA DevOps platform can certainly help [protect against software supply chain attacks](/blog/devops-platform-supply-chain-attacks/). Here are some examples how:\n\n- End-to-end visibility and auditability: Who changed what, where, and when.\n\n- Consistent application and administration of policies: Both what policies are used where, and the actions taken for exceptions\n\n- More intelligent response through greater end-to-end context\n\n- Reduced attack surface of a simplified toolchain\n\nDevOps platforms can even support more sophisticated software supply chain security techniques such as [securing pipeline builds with code signing](/blog/secure-pipeline-with-single-sign-in/). Code signing is an area of interest to standards bodies setting requirements for protecting software supply chains.\n \n## GitLab’s strengths in software supply chain security\n\nGitLab has been at the leading edge of DevSecOps, helping organizations to evolve their security practices from traditional application testing.\n\nFor instance, rather than being performed by security pros, using their own tools, at the end of the development cycle, security testing is automated within the CI pipeline with findings delivered to developers while they are still iterating on their code. Read how GitLab is also [revolutionizing CI and security, and remediation practices](/blog/gitlab-is-setting-standard-for-devsecops/).\n\nGitLab is laser-focused on enabling organizations to establish and manage security and compliance guardrails that allow developers to run fast while also managing risk, including the introduction of [continuous compliance and policy engines](/blog/gitlabs-newest-continuous-compliance-features-bolster-software/), as well as [automated attestation](/blog/securing-the-software-supply-chain-through-automated-attestation/) and [SBOMs](/blog/the-ultimate-guide-to-sboms/).\n\nThe GitLab partner ecosystem helps the platform to meet even more security needs, including [generating SBOMs\nautomatically](/blog/gitlab-and-testify-sec-witness-alliance/) and [protecting software from malicious modules](/blog/terraform-as-part-of-software-supply-chain-part1-modules-and-providers/).\n\nMore on GitLab’s software supply chain security vision can be found [here](/blog/gitlab-supply-chain-security/). And learn even more about securing the software supply chain as GitLab Field CTO [Lee Faus](https://gitlab.com/lfaus) answers some burning questions:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/HubJIQ-x2EA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[750,1128,1428],{"slug":2249,"featured":6,"template":683},"the-ultimate-guide-to-software-supply-chain-security","content:en-us:blog:the-ultimate-guide-to-software-supply-chain-security.yml","The Ultimate Guide To Software Supply Chain Security","en-us/blog/the-ultimate-guide-to-software-supply-chain-security.yml","en-us/blog/the-ultimate-guide-to-software-supply-chain-security",{"_path":2255,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2256,"content":2262,"config":2267,"_id":2269,"_type":16,"title":2270,"_source":18,"_file":2271,"_stem":2272,"_extension":21},"/en-us/blog/top-reasons-for-software-release-delays",{"title":2257,"description":2258,"ogTitle":2257,"ogDescription":2258,"noIndex":6,"ogImage":2259,"ogUrl":2260,"ogSiteName":697,"ogType":698,"canonicalUrls":2260,"schema":2261},"Top reasons for software release delays","In our 2022 Global DevSecOps survey, DevOps pros shared their frustrations with software releases, including security's shift left and complicated code reviews.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664070/Blog/Hero%20Images/cloudwatch-gitlab-incident-management-bg.jpg","https://about.gitlab.com/blog/top-reasons-for-software-release-delays","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top reasons for software release delays\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-08-30\",\n      }",{"title":2257,"description":2258,"authors":2263,"heroImage":2259,"date":2228,"body":2264,"category":14,"tags":2265},[1859],"\n_What’s the most likely reason for a software release delay?_\n\nFrom 2019 through 2021, respondents to our Global DevSecOps Surveys _always_ blamed software testing. This year, however, was dramatically different.\n\nMore than 5,000 DevOps practitioners took our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/), and, for the first time, they offered five equally valid reasons why releases might be tardy: code development, code review, security analysis, test data management, and, of course, testing. \n\nProcesses and priorities are clearly changing in DevOps teams today, and they’re affecting release delays. Here’s how to understand the forces at work.\n\n> Join us at [GitLab Commit 2022](/events/commit/) and connect with the ideas, technologies, and people that are driving DevOps and digital transformation.\n\n## Code development and code review\n\nOver the past three years, code development and code review were the second- and third-ranked culprits for release delays. That’s to be expected: No one ever said code development was easy and code reviews have always been problematic.\n\nDevelopers report [a myriad of challenges with code review](/blog/the-code-review-struggle-is-real-heres-what-you-need-to-know/): It’s too labor intensive, no one is available to do it, and the culture often doesn’t support the process. But in this year’s survey, 76% of developers said they find code reviews “very” or “somewhat” valuable, and a majority said code review was one of the key steps in DevOps they wish they could do more of. All told, 27% of developers review code weekly while another 21% review it daily or with every commit.\n\nClearly, code review is important but [it takes work](/blog/tips-for-better-code-review/) to make them happen more efficiently. One up-and-coming solution that could help make code reviews easier is artificial intelligence. Our survey found 31% of DevOps teams use AI for code review today, more than double the percentage in 2021. GitLab is also excited about the possibilities found in AI’s close cousin machine learning – we’re using it to [improve the code review process](/blog/the-road-to-smarter-code-reviewer-recommendations/). \n\n## Keeping software secure\n\nCreating safe code requires security testing and the frustration around this step is both real and longstanding. Security has nearly always [been seen as a “blocker”](/blog/developer-security-divide/) when it comes to software development in general and software releases in particular. In our 2022 survey, though, priorities have changed. Security is now the top area DevOps teams plan to invest in this year, and a majority of developers report that the most difficult part of their job is keeping software secure. Here’s just a sample of what developers had to say about the challenges of their roles today: \n\n_We are trying to keep up with the latest tools and security for optimal performance and privacy._\n\n_We are trying to build applications that are secure and stable._\n\n_It is challenging to keep it secure and keep it updated._\n\n_Cyber security attacks are the biggest challenge facing us today._\n\n_Data security, data security, I repeat, data security._\n\nThe focus on security isn’t just talk, either. More than 50% of DevOps teams are running SAST, DAST, and container scans, all dramatic increases from 2021. But at the same time, this is the fourth year security pros have continued to blame developers for finding too few bugs too late in the process. Security is a developer performance metric for many teams, but sec team members say it is still very hard to get devs to actually fix bugs, a trend we’ve seen reflected over and over.\n\nIn other words, it’s complicated enough to make the potential of delays unsurprising.\n\n## Managing the test data\n\nToo much test data is one of those good and bad problems to have: 47% of DevOps teams we surveyed report full test automation, nearly double the percentage from last year, and more security scans are being run too. More than half of survey takers (53%) are testing their code as it’s being written, up 21% from last year.\n\nAll those tests result in a data management problem most teams aren’t actually set up to handle. Here’s one example: Less than one-third of teams are able to put DAST and SAST results into a developer’s workflow/IDE and those percentages remain stubbornly low year after year. \n\nTesting momentum and automation are growing by leaps and bounds, but teams now need better ways to evaluate, communicate, and act on the data.\n\n## The tricky nature of software testing\n\nSoftware testing has often worn the “DevOps scapegoat” mantle, and perhaps for good reason. Getting testing just right is critical, but it’s also elusive. There are so many kinds of tests teams can run, test automation requires a big process and culture investment, and test results are often seen as “flaky,” “noisy,” and “late” by busy developers not enthused about context switching or inaccurate results. \n\nBut there are a couple of promising signs: As we saw in 2021, developer respondents told us again this year that testing is high on their list of tasks they would like to do more of. And artificial intelligence is also making inroads: About 37% of teams are using AI/ML to test their code (a 23-point jump from 2021) and 20% more are planning to add it to their DevOps practice this year.\n\nWant to understand more about software release delays and DevOps best practices? Read our [2022 Global DevSecOps Survey](/developer-survey/).\n",[1589,2266,815,1128],"code review",{"slug":2268,"featured":6,"template":683},"top-reasons-for-software-release-delays","content:en-us:blog:top-reasons-for-software-release-delays.yml","Top Reasons For Software Release Delays","en-us/blog/top-reasons-for-software-release-delays.yml","en-us/blog/top-reasons-for-software-release-delays",{"_path":2274,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2275,"content":2281,"config":2287,"_id":2289,"_type":16,"title":2290,"_source":18,"_file":2291,"_stem":2292,"_extension":21},"/en-us/blog/a-3-step-plan-for-devops-platform-migration",{"title":2276,"description":2277,"ogTitle":2276,"ogDescription":2277,"noIndex":6,"ogImage":2278,"ogUrl":2279,"ogSiteName":697,"ogType":698,"canonicalUrls":2279,"schema":2280},"A 3-step plan for DevOps platform migration","Too many tools = too much time wasted. Use our 3-step plan and detailed checklist to jumpstart a DevOps platform migration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668319/Blog/Hero%20Images/more-robust-task-lists.jpg","https://about.gitlab.com/blog/a-3-step-plan-for-devops-platform-migration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A 3-step plan for DevOps platform migration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lauren Minning\"}],\n        \"datePublished\": \"2022-08-25\",\n      }",{"title":2276,"description":2277,"authors":2282,"heroImage":2278,"date":2284,"body":2285,"category":14,"tags":2286},[2283],"Lauren Minning","2022-08-25","\n\nWhen making your DevOps platform migration plan, less really is more, at least when it comes to tools.\n\nOur [2022 Global DevSecOps Survey](/developer-survey/) found that not only do teams have _lots_ of tools, they spend a significant amount of time managing them. All told 40% of developers spend between one quarter and one half of their time on toolchain maintenance and integration, and another 33% spend between 50% and **all** of their time on this task. So it’s hardly a surprise that 69% of survey takers said they want to consolidate their toolchains.\n\nOne obvious way to consolidate is migrating to a DevOps platform. DevOps platform migration does take some planning and teamwork, but it can be done. Here’s a 3-step plan (and a self-evaluation checklist) to get teams started.\n\n## Choose the right path\n\nThe most important thing to know about migrating to an end-to-end DevOps platform is that everyone's needs are different so there isn’t one “right way” to carry out your migration.\n\nA company that has 1,000 users will have completely different DevOps needs than a company that has 5,000 users. What your specific DevOps platform migration plan requires will depend on the types of projects you migrate, the file types within those projects, and a whole host of other parameters. Because of this, there is not a “one size fits all” migration process for everyone to follow. \n\nHere’s a basic 3-step guide for migrating to a DevOps platform:\n\n**Begin by identifying** the strategic goals and be clear about why they are a priority for future business plans.\n\n**Evaluate tools** currently in use that no longer serve future goals. Ultimately the goal should be to operate entirely out of a single application for maximum efficiency. But it may make sense to migrate some things now and others down the line. \n\nThis is the time to become a historian and discern which tools have been problematic in the past. Consider what to migrate right away or later on and why (i.e., instability or costly maintenance and licensing) and really use that to inform the migration process. \n\n_An important note: Take into consideration the business disruption that migration has on a company. Replacing existing tools with a new DevOps platform in one step could mean sweeping changes across the organization, and the fallout might not be worth it. Instead, start with the things taking time, effort and money to maintain. And continue to keep it as simple and streamlined as possible._\n\n**Have everyone** on the team complete a self-evaluation so there are no surprises.\n\n## Do a self-evaluation \n\nHere are key questions to ask:\n\n- What’s the timeline? Discuss with all involved parties – existing team members and a representative of the new DevOps platform – how much time to allot for a completed migration. Migrations can take anywhere from 2 weeks for the initial migration to 3-6 months for monitoring. \n\n- What are the costs? This kind of platform adoption can ultimately save a LOT of money. However, the adoption of a new DevOps platform and the associated migration will no doubt have costs. Consider all costs and make sure they align with budgetary goals and requirements.\n\n- What about assistance? Are other parts of the company prepared to support a migration? How much of this will require work from the existing team and how much support will the DevOps platform provider offer? \n\n- Who are the primary and other platform users? What teams of people will migrate to this new platform? Will everyone have the same level or different levels of permissions? What needs to be done so that these teams are prepared to learn and teach the ins and outs of the new platform to other team members? \n\n- What data is migrating? Make sure to have a 360 view of the data involved in a migration including, projects, issues, and file types. What changes can happen with data when moving to a brand new DevOps platform? When evaluating the projects planned for migration, explore which applications teams spend the most time and energy working with, and what will set them up for success in the new platform.\n\n- How will automation fit in? Ensure teams understand the technology underpinnings of automation, like Kubernetes, CI/CD and more.\nHow should it be customized? Not every tool on a DevOps platform will be right for every team, and some tools might be a better fit at a later date. It makes sense to address any technology “outliers” right from the start. \n\n- Should the process be documented? Every step of the migration process should be documented and shared across teams. This level of transparency and an iterative, easy-to-search knowledge base can help problem-solve and refer back to stages already completed. Much like a single source for DevOps, a single source of truth for DevOps migration info helps everyone involved. \n\n- What about security? Security is never a “one and done,” but this is a good time to consider processes and levels of protection.\nWhat are good results?: What will a successful migration look like – when data is moved, or when teams are comfortable in their knowledge and use of the new system? Map out what the goals that will be critical to a successful migration.\n\nCheck out our _[Migrating to a DevOps platform](https://page.gitlab.com/migrate-to-devops-guide.html)_ eBook  for even more useful information about how to complete a successful DevOps platform migration.\n",[1128,1589,750],{"slug":2288,"featured":6,"template":683},"a-3-step-plan-for-devops-platform-migration","content:en-us:blog:a-3-step-plan-for-devops-platform-migration.yml","A 3 Step Plan For Devops Platform Migration","en-us/blog/a-3-step-plan-for-devops-platform-migration.yml","en-us/blog/a-3-step-plan-for-devops-platform-migration",{"_path":2294,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2295,"content":2301,"config":2306,"_id":2308,"_type":16,"title":2309,"_source":18,"_file":2310,"_stem":2311,"_extension":21},"/en-us/blog/too-many-toolchains-a-devops-platform-migration-is-the-answer",{"title":2296,"description":2297,"ogTitle":2296,"ogDescription":2297,"noIndex":6,"ogImage":2298,"ogUrl":2299,"ogSiteName":697,"ogType":698,"canonicalUrls":2299,"schema":2300},"Ditch toolchain problems with a DevOps platform","Migrating to a platform is the next step in the DevOps evolution.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667978/Blog/Hero%20Images/go-tools-and-gitlab.jpg","https://about.gitlab.com/blog/too-many-toolchains-a-devops-platform-migration-is-the-answer","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ditch toolchain problems with a DevOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-08-24\",\n      }",{"title":2296,"description":2297,"authors":2302,"heroImage":2298,"date":2303,"body":2304,"category":14,"tags":2305},[1364],"2022-08-24","\n\nBy adopting DevOps tools without an end-to-end platform, teams have been adding complexity, mounting costs, and headaches to their job. [Migrating to a true Devops platform](https://page.gitlab.com/migrate-to-devops-guide.html) is the way to get out from under all of that and gain control of projects, break down silos, and cultivate collaboration.\n\nCompanies are increasingly turning to DevOps to create software more efficiently and securely. However, not all of them have adopted a [single DevOps platform](/blog/welcome-to-the-devops-platform-era/), instead opting to cobble together a myriad of tools to handle everything in the software development lifecycle – from planning to delivery. Of course, DevOps tools are helpful, but there can be too much of a good thing.\n\nThis do-it-yourself, or DIY, effort creates a mish-mash of tools that force team members to continuously jump back and forth between multiple interfaces, passwords, and ways of working. It also creates a chaotic environment that needs to be endlessly updated and held together with digital duct tape. And by using a plethora of disparate tools, no one gets an overall view of the projects they’re working on.\n\nGoing DIY isn’t just affecting software development and deployment. It’s also weighing down the business that relies on those products.\n\nThe [problem solver here is the end-to-end platform](/blog/the-devops-platform-for-agile-business/). It’s the next step in DevOps, changing the way people work in a fundamental way.\n\nMigrating from a seat-of-your-pants, DIY system to a simpler, more powerful, single application brings a lot of benefits. Using an end-to-end platform eliminates the time-consuming and costly tangle of tools, breaks down silos, [builds security into every step](/blog/one-devops-platform-can-help-you-achieve-devsecops/) of the development process, and speeds strategic visions into actual working software. The platform enables tech teams to increase efficiency by focusing on delivering software, instead of updating, patching, and stitching together toolchains. \n\n## Eliminating the DevOps tax\n\nMigrating from a complex toolchain to a platform also will eliminate the DevOps tax. \n\nThat refers to the cost that organizations incur when they employ multiple tools and/or multiple toolchains instead of a single, continuous platform. Think about how much time workers spend stitching together and maintaining a toolchain rather than focusing on planning, developing, and deploying software.\n\nHow much are organizations wasting on the dreaded DevOps tax? Too much: our [2022 Global DevSecOps Survey](/developer-survey/) found nearly 40% of devs are spending between one-quarter and one-half of their time integrating and maintaining toolchains, while another 33% spend half to **all** of their time dealing with this issue. Thus it's no surprise that 69% of respondents want to consolidate their toolchains.\n\nA return on investment, or ROI, should come quickly for companies migrating to a DevOps platform, since they will be saving the money that would have been spent watering and feeding a large, complicated tangle of tools. \n\n##  Fostering collaboration\n\nAnother value add to using a DevOps platform is that it will [foster collaboration](/blog/5-ways-collaboration-boosts-productivity-and-your-career/) and shared responsibility. Team members will no longer be working in isolated silos, focused only on their own project – or even just a piece of a project. A DevOps platform enables communication and information sharing. It also adds transparency by giving everyone with a stake in the project a clear view of the progress being made and any challenges being encountered. It also allows for people to make suggestions to share ideas or help clear away obstacles. \n\nA [DevOps platform](/solutions/devops-platform/) will streamline every aspect of the software development lifecycle — from planning to development, testing, deployment, and monitoring. Check out the [Migrating to a DevOps platform playbook](https://page.gitlab.com/migrate-to-devops-guide.html) for more information on replacing your DIY DevOps toolchain with an end-to-end platform.\n",[1128,1589,750],{"slug":2307,"featured":6,"template":683},"too-many-toolchains-a-devops-platform-migration-is-the-answer","content:en-us:blog:too-many-toolchains-a-devops-platform-migration-is-the-answer.yml","Too Many Toolchains A Devops Platform Migration Is The Answer","en-us/blog/too-many-toolchains-a-devops-platform-migration-is-the-answer.yml","en-us/blog/too-many-toolchains-a-devops-platform-migration-is-the-answer",{"_path":2313,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2314,"content":2319,"config":2325,"_id":2327,"_type":16,"title":2328,"_source":18,"_file":2329,"_stem":2330,"_extension":21},"/en-us/blog/gitlabs-2022-global-devsecops-survey-security-is-the-top-concern-investment",{"title":2315,"description":2316,"ogTitle":2315,"ogDescription":2316,"noIndex":6,"ogImage":1935,"ogUrl":2317,"ogSiteName":697,"ogType":698,"canonicalUrls":2317,"schema":2318},"DevSecOps Survey 2022: Security leads concern and investment","Find out if your successes and concerns about security and more match those of your peers.","https://about.gitlab.com/blog/gitlabs-2022-global-devsecops-survey-security-is-the-top-concern-investment","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's 2022 Global DevSecOps Survey: Security is the top concern, investment\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-08-23\",\n      }",{"title":2320,"description":2316,"authors":2321,"heroImage":1935,"date":2322,"body":2323,"category":14,"tags":2324},"GitLab's 2022 Global DevSecOps Survey: Security is the top concern, investment",[1859],"2022-08-23","\nThe days of security as a “nice to have” are officially over as we enter the era of [DevSecOps](/topics/devsecops/). In our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/) of more than 5,000 practitioners, security was the driving force behind technology choices, team structure, DevOps platform use, and more. \n\nThe findings from our [sixth annual survey](/developer-survey/) represent a dramatic shift from past years, when security teams – and security concerns – were often siloed and silenced in the push to get software out the door faster.\n\nNothing could be further from the truth today:\n\n- The number one reason to implement a DevOps platform? Security. (And 75% of DevOps teams use a [DevOps platform](/topics/devops-platform/) currently or plan to this year.)\n\n- The number one benefit of a DevOps platform? Security.\n\n- The number one investment priority for 2022? Security.\n\nThe attention to security in DevOps teams doesn’t stop there. As our surveys have shown since 2020, [DevOps roles continue to shift](/blog/software-developer-changing-role/), and this year, many of those shifts were laser-focused on security.\n\n- 53% of developers told us they’re “fully responsible” for security in their organizations, a 14 point increase from 2021.\n\n- Over one-third of security pros report being “hands on” and involved on a daily basis with dev and ops, an 11% increase from last year (and a massive cultural shift from groups not always known to get along).\n\n- Almost 50% of ops pros say they’re fully responsible for security in their organizations, up 20% from last year. \n\nAnd when we asked developers about the most difficult parts of their jobs, thousands pointed to security and security-related concerns. Three developers summed it up:\n\n_“Cyber security attacks are the biggest concerns facing us today.”_\n\n_“Data security, data security, I repeat, data security.”_\n\n_“Trying to build applications that are secure and stable.”_\n\n## More work to do\n\nSecurity clearly has a seat at the DevOps table today, but areas of friction remain. \n\nFor starters, security testing requires a balance that’s difficult to achieve. Static application security testing [(SAST)](/direction/secure/static-analysis/sast/), dynamic application security testing [(DAST)](/direction/secure/dynamic-analysis/dast/), and container and dependency scans are increasing, which is good news, but the percentage of devs able to easily access those results in their workflows remains stubbornly low (30% or less). \n\nAnd sec and dev [may never see eye to eye](/blog/developer-security-divide/) on finding and fixing bugs. For the third year in a row, sec pros said devs don’t find enough bugs early enough in the process, meaning they are stuck finding and fixing them much later (when it’s more difficult). And, as we’ve heard repeatedly over the last years, security’s focus and development’s focus aren’t usually the same: \n\n**57% of sec pros said finding bugs was a developer performance metric in their organizations, but 56% said it was difficult to get developers to actually prioritize bug remediation.**\n\n## Facing the future\n\nWhile security pros feel good about their organizations’ security postures (71% rated them as “good” or “very good”), they’re not feeling particularly optimistic about the future. A full 43% said they feel “somewhat” or “very” unprepared for the future; to look at it from another way, the percentage of sec pros who are confident, 56%, is 20 points *lower* than either their ops or dev colleagues.\n\nWhat can help power security professionals into the future? Surprisingly, the top answer (54%) is AI, which was a 33% increase from last year. Since 2020, sec respondents have said soft skills like communication and collaboration were most important but this year soft skills came in second place.\n\nSecurity is just one of many themes – automation, AI, information overload, real world challenges, compliance, and faster releases, to name just a few – our survey uncovered. So download and share the entire report, [“The 2022 DevSecOps Survey: Thriving in an Insecure World”](/developer-survey/), to dig deeper into them.\n\n## Read the previous surveys!\n\n[GitLab 2021 DevSecOps Survey](/developer-survey/previous/2021)\n\n[GitLab 2020 Global Developer Report: DevSecOps](/developer-survey/previous/2020/)\n\n[GitLab 2019 Global Developer Report: DevSecOps](/developer-survey/previous/2019/)\n",[1589,1128,750],{"slug":2326,"featured":6,"template":683},"gitlabs-2022-global-devsecops-survey-security-is-the-top-concern-investment","content:en-us:blog:gitlabs-2022-global-devsecops-survey-security-is-the-top-concern-investment.yml","Gitlabs 2022 Global Devsecops Survey Security Is The Top Concern Investment","en-us/blog/gitlabs-2022-global-devsecops-survey-security-is-the-top-concern-investment.yml","en-us/blog/gitlabs-2022-global-devsecops-survey-security-is-the-top-concern-investment",{"_path":2332,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2333,"content":2339,"config":2344,"_id":2346,"_type":16,"title":2347,"_source":18,"_file":2348,"_stem":2349,"_extension":21},"/en-us/blog/the-gitlab-guide-to-modern-software-testing",{"title":2334,"description":2335,"ogTitle":2334,"ogDescription":2335,"noIndex":6,"ogImage":2336,"ogUrl":2337,"ogSiteName":697,"ogType":698,"canonicalUrls":2337,"schema":2338},"The GitLab guide to modern software testing","If test is your DevOps team's Public Enemy No. 1, it's time to rethink your strategy. Here's what you need to know about modern software testing.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668307/Blog/Hero%20Images/test-automation-devops.jpg","https://about.gitlab.com/blog/the-gitlab-guide-to-modern-software-testing","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The GitLab guide to modern software testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-08-18\",\n      }",{"title":2334,"description":2335,"authors":2340,"heroImage":2336,"date":2341,"body":2342,"category":14,"tags":2343},[1859],"2022-08-18","\nWhat's the trickiest part of DevOps? It's software testing, hands down. Year after year, respondents to our [annual DevSecOps surveys](/developer-survey/) have called out testing as the most likely reason for release delays. And that's not all they said: \"Testing takes too long,\" \"There are too many tests,\" \"We need to do more testing,\" “We need more automated testing but don't have time,\" \"Testing happens too late,\" etc.\n\nClearly something this fraught needs all the help, so here is our best advice to get testing \"just right\" in any modern DevOps practice. \n\n## Use the right metrics\n\nAll of the testing in the world doesn't matter if a DevOps team is measuring the wrong things. At GitLab, we use industry-standard metrics, but we look at them a bit differently. When it comes to S1 and S2 bugs we don’t count the time to close but rather the age of the bugs that remain open. Our reasoning? We want to look forward, but we also don't want to [incentivize closing only newer bugs](/blog/gitlab-top-devops-tooling-metrics-and-targets/). So it's important to make sure DevOps teams are looking at the right metrics and with shared goals in mind.\n\n## Forget flaky\n\nTests are noisy, and they can be flaky, setting off alarms and disrupting developer flow, often for no reason. That's at the heart of developer frustration with testing, and one of the biggest problems DevOps teams need to solve. GitLab's Vice President of Quality [Mek Stittri](/company/team/#meks) suggests re-thinking how automated tests are created. Tests need to be validating the right things, but that must include looking at how all of the code components work together and not just at pieces of code. Finally, it doesn't hurt to [develop a manual testing mindset](/blog/software-test-at-gitlab/).\n\n## Make it modern\n\nIn fact, a manual testing mindset, where test designers create tests that actually mimic what real users do, is a key underpinning of modern software testing in DevOps. Testers need to consider getting certified, embracing new technologies like AI, and, perhaps most importantly, be [evangelists for quality](/blog/how-to-leverage-modern-software-testing-skills-in-devops/) on a DevOps team.\n\n## Make automation work harder\n\nSoftware testing may be the most annoying DevOps step, but there's no doubt that automating the process makes everything work more smoothly. Teams with test automation [have fewer complaints about release delays](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/). And teams that have taken it up a notch and added AI/ML into their test automation process are even more upbeat about testing. After all, bots [don't need to take a lunch break or a vacation](/blog/the-software-testing-life-cycle-in-2021-a-more-upbeat-outlook/). Finally, if automation is well thought out, QA and developers can [actually work together to get code out the door](/blog/what-blocks-faster-code-release/).\n\n## Test for everything\n\nFor all the developer finger-pointing around software testing, it's also clear from our surveys that _more_ testing – of everything – has to happen. When considering how to modernize a software testing strategy, don't forget that \"nice to haves\" like [accessibility testing](/blog/introducing-accessibility-testing-in-gitlab/) aren't actually optional but critical for success.\n\nAnd also don't overlook the potential of newer test techniques like [fuzzing](/blog/why-continuous-fuzzing/), which can work with [Go](/blog/how-to-fuzz-go/), [Rust](/blog/how-to-fuzz-rust-code/), and other languages, and take testing into places other methodologies cannot.\n\n## The bottom line\n\nTesting doesn't have to be the enemy of speedy releases or the object of so much frustration. Start fresh with a modern software testing approach and and make it easy for teams to get the most out of QA.\n",[792,1128,728],{"slug":2345,"featured":6,"template":683},"the-gitlab-guide-to-modern-software-testing","content:en-us:blog:the-gitlab-guide-to-modern-software-testing.yml","The Gitlab Guide To Modern Software Testing","en-us/blog/the-gitlab-guide-to-modern-software-testing.yml","en-us/blog/the-gitlab-guide-to-modern-software-testing",{"_path":2351,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2352,"content":2358,"config":2363,"_id":2365,"_type":16,"title":2366,"_source":18,"_file":2367,"_stem":2368,"_extension":21},"/en-us/blog/eight-steps-to-prepare-your-team-for-a-devops-platform-migration",{"title":2353,"description":2354,"ogTitle":2353,"ogDescription":2354,"noIndex":6,"ogImage":2355,"ogUrl":2356,"ogSiteName":697,"ogType":698,"canonicalUrls":2356,"schema":2357},"8 Steps to prepare your team for a DevOps platform migration","Getting teams ready enables them to migrate with more confidence and ease. Here's how to get started.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663786/Blog/Hero%20Images/craftsman-looks-at-continuous-integration.jpg","https://about.gitlab.com/blog/eight-steps-to-prepare-your-team-for-a-devops-platform-migration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"8 Steps to prepare your team for a DevOps platform migration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-08-16\",\n      }",{"title":2353,"description":2354,"authors":2359,"heroImage":2355,"date":2360,"body":2361,"category":14,"tags":2362},[1364],"2022-08-16","\nWhen organizations are getting ready to [move to a DevOps platform](https://page.gitlab.com/migrate-to-devops-guide.html), taking the time to get IT teams prepped for the migration will mean people can make the transition with more confidence and efficiency.\n\nBy [replacing a complicated mix of DevOps tools](/topics/devops/use-devops-platform-to-avoid-devops-tax/) with a single, end-to-end DevOps platform, you are about to change the way people work in a fundamental way. That will bring many benefits, like cutting tool-management costs, [increasing security](/blog/one-devops-platform-can-help-you-achieve-devsecops/), speeding software creation and deployment, and [replacing silos with a collaborative environment](/blog/5-ways-collaboration-boosts-productivity-and-your-career/). But any kind of change can create anxiety. By reaching out to people as part of your migration prep, managers can calm those stresses, create champions for the adoption, and ease the work that’s to come. \n\nLet’s look at what IT leaders can do to ease this transition for everyone.\n\n## Build buy-in\n\nStarting at the VP and CIO level, create organization-wide buy-in for this migration. This will be a wide-reaching project so everyone from the C-suite on down needs to be on board. Help them understand the importance of making this move. It’s not about adding a new tool – it’s about improving the way software development works overall, so make sure everyone is invested _from the beginning_. “Management and DevOps teams both need to understand that not migrating will ultimately take up more time and energy because they’d be forced to continue time-consuming glue work and duct taping to keep the toolchain stitched together,\" says [Brendan O’Leary](/company/team/#brendan), staff developer evangelist at GitLab. “People will be doing a lot less of that after a migration.”\n\n> Join us at [GitLab Commit 2022](/events/commit/) and connect with the ideas, technologies, and people that are driving DevOps and digital transformation.\n\n## Find champions\n\nEarly in the process, find your innovators and migration champions. Talk with people on every team to figure out who is excited about adopting a DevOps platform. These people will be critical. Empower them to lead the charge by allowing them to be the first to migrate with your full, visible support. Then their migration successes will serve as inspiration for those less excited to make the move.\n\n## Ease tension\n\nRemember that change makes people nervous and be sensitive to that. Get ahead of any anxieties by laying out how continuing on with their existing (and ever-expanding) [toolchains will only suck up more of their time and efforts](/blog/the-journey-to-a-devops-platform/) because they’ll have to remain focused on juggling a tangle of tools, instead of actually turning plans into software. Toolchains are not the fun part of their jobs, and they’ll be letting go of that.\n\n## Set expectations\n\nTalk with workers about what this will mean for them individually. Reassure them that this does not mean their jobs will be eliminated. However, it will change their day-to-day responsibilities since they’ll be doing less feeding and watering of disparate tools. That will give them more time to take on bigger, more valuable and more interesting projects. Developers, in particular, want to [work on projects that matter](/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen/). Decreasing the toolchain red tape will be a huge step towards increased job satisfaction. \n\n## Define roles\n\nNot everyone on every team will work on the migration. Some will need to keep software development and deployment moving along, while others work on the adoption. Make it clear to individual team members what their roles will be. They’ll automatically be more at ease if it’s clear what their migration responsibilities will be.\n\n## Plan for training\n\nAssure everyone there will be training. They won’t just be thrown into the deep end of the pool. Make sure they know you will be setting them up for success.\n\n## Create sample projects\n\n[Fatima Sarah Khalid](/company/team/#sugaroverflow), a developer evangelist at GitLab, says that even before a migration even begins, managers should ensure their team members are ready to use a DevOps platform to do everything from planning to testing, and pushing software iterations through to production. “Managers should think about having a sample project set up with issues and epics. Set up workflows and merge requests. Run it all through,” says Khalid. “Getting hands-on experience before the migration will get rid of anyone’s fear that they’ll break something.”\n\n## Lay out the benefits\n\nMake sure everyone understands the benefits of using a DevOps platform:\n\n- Your business will be able to quickly, securely, and efficiently turn a vision into software.\n\n- Working in isolated silos will be replaced with working in tandem with teammates, [collaborating, and sharing information and responsibilities](/blog/if-its-time-to-learn-devops-heres-where-to-begin/).\n\n- A single application will give an overarching view of projects, enabling teams to check in on, comment on and offer suggestions on projects as they move through the development lifecycle.\n\n- Security and compliance will increase as it will be built into every step of the development and deployment lifecycle.\n\n- [Built-in automation](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/) will reduce repetitive hands-on work with everything from testing to documentation.\n\nBy preparing teams to make the move to a DevOps platform, the entire migration process will be easier and more efficient. For more information on transitioning to an end-to-end platform, [check out this ebook](https://page.gitlab.com/migrate-to-devops-guide.html).\n",[1128,111,1428],{"slug":2364,"featured":6,"template":683},"eight-steps-to-prepare-your-team-for-a-devops-platform-migration","content:en-us:blog:eight-steps-to-prepare-your-team-for-a-devops-platform-migration.yml","Eight Steps To Prepare Your Team For A Devops Platform Migration","en-us/blog/eight-steps-to-prepare-your-team-for-a-devops-platform-migration.yml","en-us/blog/eight-steps-to-prepare-your-team-for-a-devops-platform-migration",{"_path":2370,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2371,"content":2377,"config":2383,"_id":2385,"_type":16,"title":2386,"_source":18,"_file":2387,"_stem":2388,"_extension":21},"/en-us/blog/why-were-sticking-with-ruby-on-rails",{"title":2372,"description":2373,"ogTitle":2372,"ogDescription":2373,"noIndex":6,"ogImage":2374,"ogUrl":2375,"ogSiteName":697,"ogType":698,"canonicalUrls":2375,"schema":2376},"Why we're sticking with Ruby on Rails","GitLab CEO and co-founder Sid Sijbrandij makes the case for Ruby on Rails.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668296/Blog/Hero%20Images/gitlab-ruby.jpg","https://about.gitlab.com/blog/why-were-sticking-with-ruby-on-rails","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we're sticking with Ruby on Rails\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2022-07-06\",\n      }",{"title":2372,"description":2373,"authors":2378,"heroImage":2374,"date":2380,"body":2381,"category":14,"tags":2382},[2379],"Sid Sijbrandij","2022-07-06","\nWhen David Heinemeier Hansson created Ruby on Rails ([interview](https://corecursive.com/045-david-heinemeier-hansson-software-contrarian/)), he was guided by his experience with both PHP and Java. On the one hand, he didn’t like the way the verbosity and rigidness of Java made Java web frameworks complex and difficult to use, but appreciated their structural integrity. On the other hand, he loved the initial approachability of PHP, but was less fond of the quagmires that such projects tended to turn into.\n\n![Ruby vs. Java](https://about.gitlab.com/images/blogimages/ruby1.png)\n\nIt seems like these are exclusive choices: You either get approachable and messy or well-structured and hard to use, pick your poison. We used to make a very similar, and similarly hard, distinction between server-class operating systems such as Unix, which were stable but hard to use, and client operating systems such as Windows and MacOS that were approachable but crashed a lot.\n\nEveryone accepted this dichotomy as God-given until NeXT put a beautiful, approachable and buttery-smooth GUI on top of a solid Unix base. Nowadays, “server-class” Unix runs not just beautiful GUI desktops, but also most phones and smart watches.\n\nSo it turned out that approachability and crashiness were not actually linked except by historical accident, and the same turns out to be true for approachability and messiness in web frameworks: They are independent axes.\n\n![approachability and messiness](https://about.gitlab.com/images/blogimages/ruby2.png)\n\nAnd these independent axes opened up a very desirable open spot in the lower right hand corner: an approachable, well-structured web framework.\nWith its solid, metaprogrammable Smalltalk heritage and good Unix integration, Ruby proved to be the perfect vehicle for DHH to fill that desirable bottom right corner of the table with Rails: an extremely approachable, productive and well-structured web framework. \n\n![a well-structured framework](https://about.gitlab.com/images/blogimages/ruby3.png)\n \nWhen GitLab co-founder Dmitriy Zaporozhets decided he wanted to work on software for running his (and your) version control server, he also came from a PHP background. But instead of sticking with the familiar, he chose Rails. Dmitry's choice may have been prescient or fortuitous, but it has served GitLab extremely well, in part because David succeeded in achieving his goals for Rails: approachability with good architecture.\n\n## Why modular?\n\nIn the preceding section, it was assumed as a given that modularity is a desirable property, but as we also saw it is dangerous to just assume things.  So why, and in what contexts, is modularity actually desirable?\n\nIn his 1971 paper [\"On the Criteria to be Used in Decomposing Systems into Modules\"](https://prl.ccs.neu.edu/img/p-tr-1971.pdf), David L. Parnas gave the following (desired) benefits of a modular system:\n\n- Development time should “be shortened because separate groups would work on each module with little need for communication.”\n- It should be possible to make “drastic changes or improvements in one module without changing others.”\n- It should be possible to study the system one module at a time.\n\nThe importance of reducing the need for communication was later highlighted by Fred Brooks in _[The Mythical Man Month](https://en.wikipedia.org/wiki/The_Mythical_Man-Month)_, with the additional communication overhead one of the primary reasons for the old saying that \"adding people to a late software project makes it later.\" \n\n## We don’t need microservices\n\nModularity has generally been as elusive as it is highly sought after, with the default architecture of most systems being the [Big Ball of Mud](http://laputan.org/mud/). It is therefore understandable that designers took inspiration from arguably the largest software system in existence: the World Wide Web, which is modular by necessity, it cannot function any other way.\n\nOrganizing your local software systems using separate processes, microservices that are combined using [REST](https://www.ics.uci.edu/~fielding/pubs/dissertation/fielding_dissertation.pdf) architectural style does help enforce module boundaries, via the operating system, but comes at significant costs. It is a very heavy-handed approach for achieving modularity.\n\nThe difficulties and costs of running what is now a gratuitously distributed system are significant, with some of the performance and reliability issues documented in the well-known [fallacies of distributed computing](https://en.wikipedia.org/wiki/Fallacies_of_distributed_computing). In short, the performance and reliability costs are significant, as function calls that take nanoseconds and never fail are replaced with network ops that are three to six orders of magnitude slower and do fail. Failures become much harder to diagnose if they must be traced across multiple services with very little tooling support.\nYou need a fairly sophisticated DevOps organization to successfully run microservices. This doesn't really make a difference if you run at a scale that requires that sophistication anyhow, but it is very likely that [you are not Google](https://blog.bradfieldcs.com/you-are-not-google-84912cf44afb?gi=1b82f8ef279a).\n\nBut even if you think you can manage all that, it is important to note that all this accidental complexity is on top of the original essential complexity of your problem, microservices do nothing to reduce complexity. And even the hoped-for modularity improvements are not in the least guaranteed, typically what happens instead is that you get a [distributed ball of mud](http://www.codingthearchitecture.com/2014/07/06/distributed_big_balls_of_mud.html).\n\n## Monorails\n\nBy making good architecture approachable and productive, Rails has allowed GitLab to develop a [modular monolith](https://medium.com/@dan_manges/the-modular-monolith-rails-architecture-fb1023826fc4). A modular monolith is the exact opposite of a distributed ball of mud: a well-structured, well-architected, highly modular program that runs as a single process and is as [boring](https://handbook.gitlab.com/handbook/values/#boring-solutions) as possible.\n\nAlthough structuring GitLab as a monolith has been extremely beneficial for us, we are not dogmatic about that structure. Architecture follows needs, not the other way around. And while Rails is excellent technology for our purposes, it does have a few drawbacks, one of them being performance. Luckily, only a tiny part of most codebases is actually performance critical. We use our own [gitaly](https://www.google.com/url?q=https://docs.gitlab.com/ee/administration/gitaly/&sa=D&source=docs&ust=1656441057979077&usg=AOvVaw11r4iMGjvs6PrtTJEkeTbO) daemon written in Go to handle actual git operations, and [PostgreSQL](https://thenewstack.io/two-sizes-fit-most-postgresql-and-clickhouse/) for non-repository persistence.\n\n## Open Core\n\nLast but not least, our modular monolith turns [our](/blog/gitlab-is-open-core-github-is-closed-source/) [Open Core](https://en.wikipedia.org/wiki/Open-core_model) business model from being just a nice theory into a practical [reality](https://www.cnbc.com/2021/10/14/gitlab-jumps-in-nasdaq-debut-after-pricing-ipo-above-expected-range.html). Although Rails does not accomplish this by itself, that would be our wonderful contributors and engineers, it does lay the proper foundations.\n\nIn order to reap the true [benefits](https://en.wikipedia.org/wiki/The_Cathedral_and_the_Bazaar) of open source, the source code that is made available must be approachable for contributors. In order to maintain architectural integrity in the face of contributions from a wide variety of sources, and to keep a clear demarcation line between the open and closed components, the code must be very well structured. Sound familiar?\n\nWouldn’t it be better to have a proper plugin interface? Or better yet, a services interface modeled on microservices? In a word: no. Not only do these approaches impose deployment and integration hurdles that go far beyond “I made a small change to the source code,\" they often enforce architectural constraints too rigidly. Anticipating all the future extension points is a fool's errand, one that we luckily did not embark on, and do not have to.\n\nWith our boring modular monolith, users and other third-party developers can and do contribute enhancements to the core product, giving us tremendous leverage, coupled with an unbeatable pace and scalability of innovation.\n",[1128,1488,1943],{"slug":2384,"featured":6,"template":683},"why-were-sticking-with-ruby-on-rails","content:en-us:blog:why-were-sticking-with-ruby-on-rails.yml","Why Were Sticking With Ruby On Rails","en-us/blog/why-were-sticking-with-ruby-on-rails.yml","en-us/blog/why-were-sticking-with-ruby-on-rails",{"_path":2390,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2391,"content":2397,"config":2402,"_id":2404,"_type":16,"title":2405,"_source":18,"_file":2406,"_stem":2407,"_extension":21},"/en-us/blog/gitlab-is-the-single-source-of-truth-for-ecommerce-provider",{"title":2392,"description":2393,"ogTitle":2392,"ogDescription":2393,"noIndex":6,"ogImage":2394,"ogUrl":2395,"ogSiteName":697,"ogType":698,"canonicalUrls":2395,"schema":2396},"GitLab is the single source of truth for eCommerce provider","Swell uses GitLab company-wide and says the biggest advantage so far is the review operations capability.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668755/Blog/Hero%20Images/swelllogo3.png","https://about.gitlab.com/blog/gitlab-is-the-single-source-of-truth-for-ecommerce-provider","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab is the single source of truth for eCommerce provider\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-06-23\",\n      }",{"title":2392,"description":2393,"authors":2398,"heroImage":2394,"date":2399,"body":2400,"category":14,"tags":2401},[1524],"2022-06-23","eCommerce platform provider [Swell](https://www.swell.is) was built to give entrepreneurs the opportunity to build the online business that they envision. A GitLab customer since 2021, GitLab has been adopted as Swell's one DevOps, project management, and support ticketing tool for the whole organization. It's the foundational platform that the business works on.\n\nSwell is using GitLab Premium in many different areas, including for product development and to build the platform infrastructure, says Nico Bistolfi, vice president of technology.\n\n\"GitLab is our source of truth for everything,\" Bistolfi says. Now, Swell is looking into expanding its usage of the platform to leverage features such as code quality, automation, and other types of dynamic application security and static application security.\n\n## GitLab for CI/CD\nSwell upgraded to the Premium version and the biggest advantage so far πpath-to-decomposing-gitlab-database-part2has been the review operations capability, Bistolfi says. The company has created environments for every merge request users make, and that replicates in production for testers to see what was changed, whether a fix was made, or how the new feature is working.\n\n\"We could not go to our software development lifecycle today without the review ops. That's something that is critical for us,\" Bistolfi says.\n\nGitLab is used for both continuous integration (CI) and continuous deployment (CD). While building the [CI/CD](/topics/ci-cd/) pipeline process is ongoing, Bistolfi says, “We are slowly changing it and relying more and more on GitLab” in areas, including application security.\n\nBefore moving to GitLab, Swell was using bare-metal servers. The company now uses GitLab’s container management solutions and all API updates are happening through the platform.\n\n## From inputting issues to resolution\nEveryone at Swell is using GitLab — not just developers — and for a variety of tasks. The company has created a way to process support tickets through the platform. Another use case is knowledge management.\n\n\"We find ourselves making some decisions from comments in GitLab,\" he says. The whole process from the time a ticket is created to being resolved is done within the platform.\n\nThe company culture is about full information transparency, Bistolfi says, particularly since Swell is fully remote and employees work from 11 different countries. So one goal is to maintain asynchronous communication.\n\nWhen an issue is created in the platform, a little bit of coding is required, but he said non-developer users have adapted well. The feedback so far has been that using GitLab has been frictionless.\n\n## Speed to delivery\nInitially, for some services, it took about 30 minutes to build and deploy an image. Now, the process has been decreased to between one and five minutes in most cases.\n\nSwell manually sets release dates for system improvements and, right now, there are about two a week. The company is working on automating the process for continuous delivery with the goal of soon having releases every couple of hours.\n\n## Team play\nSwell manages team backlogs, sprints, milestones, and future work using its own flavor of Kanban with what Bistolfi calls \"quick labels.\"\n\nEngineering teams are being scaled and, in addition to Kanban, some projects are done using Scrum. Changing their GitLab configuration has let teams measure velocity better.  \n\nA future goal is to gain visibility into team results, as well as use GitLab for project planning and management, he says.\n\n## GitLab as a product and company\nBistolfi is unequivocal in his enthusiasm for GitLab. \"We know that GitLab is there for us to continue growing,\" he says. \"We know we can rely on that. And something that I always tell a team when we are evaluating what we're going to do or how we're going to solve certain problems is that there are areas GitLab is just starting to innovate on or is just starting to launch new features.\"\n\nIf those areas are at 80% of what Swell needs, the company will continue to use GitLab. \"We need to have very, very strong reasons to look for another tool to integrate with GitLab.\" He added that \"we trust that GitLab is going in the right direction for us. In addition, we've gained efficiency in our ability to provide consistent test environments using Gitlab Review Apps to reduce regressions and improve new feature development.\"\n\nThe Swell team also likes that GitLab provides thorough and complete information in its handbook, which has been very beneficial in helping the company manage things internally. \"That has been inspiring for many of us on the executive team,\" he notes.\n\nFor example, during the pandemic, Bistolfi put together a document called \"The Ultimate Guide for Swell Engineers,\" which contains three pages of information about culture, what to expect from teammates, and how to communicate and prioritize tasks.\n\nA lot of guidance came from the GitLab handbook, he adds.\n\nMoving forward with GitLab, Bistolfi says: \"We are incorporating most of the Security and Compliance tools in order to keep track and audit for our compliance. We plan to expand the usage to other projects, but we are already using container and dependency scanning, SAST, secrets detection, and license scanning for some of our core and more sensitive services.\"\n\nWhat Swell likes most about GitLab is the thoroughness of the tool. \"From an engineering perspective, 10 years ago, you would never have imagined all the features and capabilities that GitLab offers being incorporated into one platform,\" Bistolfi says.",[1128,793,1369,750,1528],{"slug":2403,"featured":6,"template":683},"gitlab-is-the-single-source-of-truth-for-ecommerce-provider","content:en-us:blog:gitlab-is-the-single-source-of-truth-for-ecommerce-provider.yml","Gitlab Is The Single Source Of Truth For Ecommerce Provider","en-us/blog/gitlab-is-the-single-source-of-truth-for-ecommerce-provider.yml","en-us/blog/gitlab-is-the-single-source-of-truth-for-ecommerce-provider",{"_path":2409,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2410,"content":2416,"config":2421,"_id":2423,"_type":16,"title":2424,"_source":18,"_file":2425,"_stem":2426,"_extension":21},"/en-us/blog/how-to-ask-smarter-devops-questions",{"title":2411,"description":2412,"ogTitle":2411,"ogDescription":2412,"noIndex":6,"ogImage":2413,"ogUrl":2414,"ogSiteName":697,"ogType":698,"canonicalUrls":2414,"schema":2415},"How to ask smarter DevOps questions","Take your DevOps practice to the next level by asking 10 critical questions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667845/Blog/Hero%20Images/gl15.jpg","https://about.gitlab.com/blog/how-to-ask-smarter-devops-questions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to ask smarter DevOps questions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-06-22\",\n      }",{"title":2411,"description":2412,"authors":2417,"heroImage":2413,"date":2418,"body":2419,"category":14,"tags":2420},[1859],"2022-06-22","\n\nGitLab has [surveyed DevOps practitioners](/developer-survey/) for more than five years now. In that time, we have come to know what questions to ask to understand how well teams are doing with DevOps. In sharing these 10 questions, we aim to help you assess your own team’s capabilities and achieve smarter, faster DevOps.\n\n### How fast is your team releasing code today vs. one year ago?\n\nTracking release speed is like taking the temperature of your DevOps team. You’d like to think everything is going well, but you might be surprised. Occasionally DevOps teams report to us they are actually releasing code more slowly than in the past. \n\n### What stage(s) in the process are causing the most release delays?\n\nThis question will shine a spotlight on the areas in your DevOps practice that simply don’t work. Spoiler alert: The answer [will certainly be testing](/blog/the-software-testing-life-cycle-in-2021-a-more-upbeat-outlook/), though other things, from planning to code development and code review, might pop up, too.\n\n### How automated is your DevOps process?\n\nAsk this, but don’t just focus on testing, tempting as that might be. Also think about what else in the software development lifecycle would [benefit from automation](/blog/cd-automated-integrated/). Consider what getting that time back would afford you. Could you assign your developers and ops pros to other business-critical projects?\n\n### What’s been added to your DevOps tech stack over the last year?\n\nIt’s good to look back and take inventory of the technology you have in play. This is also data that can help inform what your next steps might be, such as adopting [GitOps](/topics/gitops/), [observability](/blog/observability-vs-monitoring-in-devops/), or [AI](https://www.youtube.com/watch?v=C08QVI99JLo).\n\n### How are your DevOps roles changing?\n\nIf your team is like others we’ve heard from, (big) changes are happening. Devs are picking up tasks that have traditionally been owned by ops, ops is becoming anything from a DevOps coach to a [platform engineer](/topics/devops/what-is-a-devops-platform-engineer/) or a cloud expert, and security is likely now embedded in development teams.\n\n### How does security integrate with DevOps in your organization?\n\nThe most successful DevOps teams have figured out how to [bridge the dev and sec divide](/blog/developer-security-divide/). Whether your team has a [security champion](/blog/why-security-champions/) or actually embeds sec pros on the dev team, this is a critical piece in the process to release safer software faster.\n\n### What advanced technologies are you using (or considering) in your DevOps practice?\n\n“Bots” can test code, [AI can review code](/blog/ai-in-software-development/), and a [low code/no code tool](/blog/low-code-no-code/) will make [citizen developers](https://www.gartner.com/en/information-technology/glossary/citizen-developer) out of anyone in the organization. Now is definitely the time to make sure your DevOps team is future-proofing the tech stack.\n\n### Do you have a plan for governance and compliance of your software supply chain?\n\nTo keep the [software supply chain secure](/blog/elite-team-strategies-to-secure-software-supply-chains/), DevOps teams need visibility into and control over the entire development lifecycle. Can you easily deal with audits or attestations of compliance? Mature governance and compliance processes are essential in all industries today, not just those that are highly regulated.\n\n### What advanced practices are you using (or considering) in your DevOps environment?\n\nWhether it’s [Infrastructure as Code (IaC)](/topics/gitops/infrastructure-as-code/), GitOps, or [MLOps](/blog/introducing-modelops-to-solve-data-science-challenges/), cutting-edge practices can jumpstart your releases and bring new and interesting opportunities to DevOps teams.\n\n### Do you regularly assess DevOps careers and roles on your team?\n\nHappy team members [really are more productive](/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen/), so consider this a PSA to keep career growth conversations a priority. \n\nIn considering these 10 questions, your team will gain a fuller picture of your DevOps capabilities and how to address the technology and talent gaps you have identified.\n\n",[1128,836,1428],{"slug":2422,"featured":6,"template":683},"how-to-ask-smarter-devops-questions","content:en-us:blog:how-to-ask-smarter-devops-questions.yml","How To Ask Smarter Devops Questions","en-us/blog/how-to-ask-smarter-devops-questions.yml","en-us/blog/how-to-ask-smarter-devops-questions",{"_path":2428,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2429,"content":2434,"config":2439,"_id":2441,"_type":16,"title":2442,"_source":18,"_file":2443,"_stem":2444,"_extension":21},"/en-us/blog/battling-toolchain-technical-debt",{"title":2430,"description":2431,"ogTitle":2430,"ogDescription":2431,"noIndex":6,"ogImage":2413,"ogUrl":2432,"ogSiteName":697,"ogType":698,"canonicalUrls":2432,"schema":2433},"Battling toolchain technical debt","DevOps teams can hinder the software development lifecycles and application performance if they let their toolchains become unruly. Read how GitLab can help reduce that technical debt.","https://about.gitlab.com/blog/battling-toolchain-technical-debt","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Battling toolchain technical debt\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-06-21\",\n      }",{"title":2430,"description":2431,"authors":2435,"heroImage":2413,"date":2436,"body":2437,"category":14,"tags":2438},[746],"2022-06-21","\nDevelopers love their tools. Operations teams love their tools. And security teams love their tools. As Dev, Sec, and Ops consolidate onto a single DevOps platform, toolchain technical debt becomes exponentially more costly and complex.\n\n“Tools should be in the background enabling excellent development, operations, and security practices. However, DevOps teams are often led by their tools rather than the other way around and that can hinder all aspects of the software development lifecycle (SDLC),” says [Cindy Blake](https://gitlab.com/cblake), CISSP, director of product and solutions marketing at GitLab.\n\nAn April 2022 Gartner® report titled “Beware the DevOps Toolchain Debt Collector” notes that “many organizations find themselves with outdated, poorly governed, and unmanageable toolchains as they scale DevOps initiatives.”\n\nOne of the key findings, according to Gartner, is that “most organizations create homegrown toolchains, often leveraging the tools beyond their functional design. This not only leads to a fragmented toolchain, but also creates complications when tooling needs to be scaled, replaced, or updated.”\n\nToolchain technical debt introduces complexity as companies shift critical tasks such as reliability, governance, and compliance left in the SDLC.\n\n> Discover how GitLab 15 can help your team deliver secure software, while maintaining compliance and automating manual processes.\nSave the date for our GitLab 15 [launch event](https://page.gitlab.com/fifteen) on June 23rd!\n\n## No time for technical debt\n\nFew DevOps teams give toolchain upkeep the time and attention it requires. According to [GitLab’s 2021 DevSecOps\nsurvey](/images/developer-survey/gitlab-devsecops-2021-survey-results.pdf), nearly two-thirds of survey respondents, 61%, said they spend 20% or less of their time on toolchain integration and maintenance each month.\n\n“Developers face challenges and time constraints while maintaining these complex, stand-alone tool siloes, building fragility and technical debt that the [infrastructure and operations] leader has to deal with,” Gartner states. The research firm adds, “These outdated toolchains further increase overhead costs, magnify technical risks, add operational toil, and limit business agility.”\n\nBlake agrees: “Complex toolchains inhibit the ability to govern the software development and deployment process. Policies must be managed across tools and visibility into code changes and changes to its surrounding infrastructure become difficult to see and track. Time is wasted on managing the toolchain instead of value-added work.”\n\n## Getting purpose-driven\nThe remedy to toolchain sprawl and subsequent debt is to change strategy. Instead of putting energy into figuring out how to maintain one-off tools, DevOps teams should focus on how to enable processes and policies that support simplicity, control, and visibility across the SDLC.\n\n“These are the characteristics needed to meet reliability, governance, and compliance demands. A united platform like GitLab helps you do that,” Blake says.\n\nGartner states: “Successful infrastructure and operations leaders reduce technical debt and sustainably scale DevOps toolchain initiatives across the organization by using a prioritized, iterative strategy that minimizes friction in making changes to toolchains and more quickly delivers customer value.”\n\nAdopting a purpose-built platform instead of a complex and ad-hoc toolchain also eases an organization’s ability to automate the SDLC. “Automation abstracts complexity away from the developer and provides guard rails so DevOps teams gain greater efficiency, accuracy, and consistency,” Blake says. In addition, automation reduces the audit footprint in terms of what needs oversight and inspection.\n\nPlatforms also support automation throughout operations, including building and\ntesting infrastructure as code, so that “you can eliminate the variables when you’re trying to debug an application,” she says. This speeds troubleshooting response times and reduces application downtime.\n\nFor instance, GitLab, the One DevOps Platform, features [dependency\nlists](https://docs.gitlab.com/ee/user/application_security/dependency_list/), also known as software bill of materials (SBOM), that show which dependencies were used and help to identify where problems exist. “GitLab also helps you avoid problems altogether by consistently scanning dependencies according to policies and compliance standards that the platform provides,” Blake says. DevOps teams can easily see what changes were made when and by whom. “That visibility is critical when trying to resolve issues and prevent them from happening again,” she says.\n\n## Reclaim your DevOps team’s time\nBy adopting a single DevOps platform, organizations can reclaim developer, security, and operations time that has been spent stitching tools together or optimizing for one developer’s tool, and then backtracking through toolchains when an application breaks because those tools can’t co-exist.\n\n“DevOps teams have a lot on their plates and trying to manage unruly toolchains is simply a waste of time. You should be creating state-of-the-art software, not manually integrating and maintaining legacy tools,” Blake says.\n\nShe emphasizes that GitLab is not “rip and replace”; it’s a platform where everything needed for DevOps comes together in one place. IT leadership benefits from this united approach as well. [Value stream\nanalytics](/solutions/value-stream-management/) provide insight into your end-to-end software throughput, helping optimize IT resources most efficiently and enabling a flexible, responsive business outcome. “We meet DevOps teams where they are and put the user – whether they be a developer, operations, or security professional – in the center of the platform,” she says.\n\n[Try GitLab Ultimate for free](/free-trial/\n) for 30 days.\n\n_GARTNER is a registered trademark and service mark of Gartner, Inc. and/or its affiliates in the U.S. and internationally and is used herein with permission. All rights reserved._\n",[1128,728,727],{"slug":2440,"featured":6,"template":683},"battling-toolchain-technical-debt","content:en-us:blog:battling-toolchain-technical-debt.yml","Battling Toolchain Technical Debt","en-us/blog/battling-toolchain-technical-debt.yml","en-us/blog/battling-toolchain-technical-debt",{"_path":2446,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2447,"content":2452,"config":2458,"_id":2460,"_type":16,"title":2461,"_source":18,"_file":2462,"_stem":2463,"_extension":21},"/en-us/blog/gitlab-value-stream-management-and-dora",{"title":2448,"description":2449,"ogTitle":2448,"ogDescription":2449,"noIndex":6,"ogImage":2413,"ogUrl":2450,"ogSiteName":697,"ogType":698,"canonicalUrls":2450,"schema":2451},"Improving visibility: GitLab's value stream and DORA metrics","Optimize DevOps with the new DORA metrics in GitLab Value Stream Management.","https://about.gitlab.com/blog/gitlab-value-stream-management-and-dora","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Break the black box of software delivery with GitLab Value Stream Management and DORA Metrics\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2022-06-20\",\n      }",{"title":2453,"description":2449,"authors":2454,"heroImage":2413,"date":2455,"body":2456,"category":14,"tags":2457},"Break the black box of software delivery with GitLab Value Stream Management and DORA Metrics",[833],"2022-06-20","\n\nOur customers frequently tell us that despite being very effective DevOps practitioners, they still struggle to build a data-driven DevOps culture. They find it especially hard to answer the fundamental question:\n\n_What are the right things to measure?_\n\nThis becomes more challenging in enterprise organizations when there are hundreds of different development groups, and there's no normalization between how things are done or measured. Because of this, we see a strong interest from customers for metrics that would allow them to standardize between teams and benchmark themselves against the industry.\n\n![Value Streams Analytics helps you visualize and manage the DevOps flow from ideation to customer delivery.](https://about.gitlab.com/images/blogimages/2022-06-dora-vsa-overview.png){: .shadow}\nValue Streams Analytics helps you visualize and manage the DevOps flow from ideation to customer delivery.\n{: .note.text-center}\n\n## What Are DORA Metrics? \n\nWith the continued acceleration of digital transformation, most organizations realize that technology delivery excellence is a must for long-term success and competitive advantage. After seven years of data collection and research, the [DORA's State of DevOps research program](https://www.devops-research.com/research.html) has developed and validated four metrics that measure software delivery performance: [(1) deployment frequency, (2) lead time for changes, (3) time to restore service and (4) change failure rate.](https://docs.gitlab.com/ee/user/analytics/#devops-research-and-assessment-dora-key-metrics) \n\nIn GitLab, The One DevOps Platform, [Value Stream Analytics (VSA)](/solutions/value-stream-management/) surfaces a single source of insight for each stage of the software development process. The analytics are available out of the box for teams to drive performance improvements.\n\n## What does DORA bring to Value Stream Analytics?\n\nValue Stream Analytics (VSA) measures [the entire journey from customer request to release](https://docs.gitlab.com/ee/user/group/value_stream_analytics/) and automatically displays the overall performance of the stream. Each stage in the value stream is transparent and compliant in a shared experience for everyone in the company. \n\nThis makes the VSA the single source of truth (SSoT) about what's happening within the entire software supply chain, with DORA’s metrics as the key measure of the value stream outputs. \n\n## How do Value Stream Analytics work?\n\nValue stream analytics measures the median time spent by issues or merge requests in each development stage.\n\nAs an example, a stage might begin with the addition of a label to an issue and end with the addition of another label:\n\n![Value stream analytics measures each stage from its start event to its end event.](https://about.gitlab.com/images/blogimages/2022-06-dora-vsa-stage.png){: .shadow}\nValue stream analytics measures each stage from its start event to its end event.\n{: .note.text-center}\n\nFor each stage, a table list displays the workflow items filtered in the context of that stage. [In stages based on labels](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#label-based-stages-for-custom-value-streams), the table will list Issues, and in stages based on Commits, it will list MRs:\n\n![The VSA MR table provides a deeper insight into stage time breakdown .](https://about.gitlab.com/images/blogimages/2022-06-dora-vsa-mr.png){: .shadow}\nThe VSA MR table provides a deeper insight into stage time breakdown.\n{: .note.text-center}\n\nThe tables provide a deep dive into the stage performance and allow users to answer questions such as:\n\n- How to easily see bottlenecks that are slowing down the delivery of value to customers?\n- How to reduce the time spent in each stage so I can deliver features faster and stay competitive? \n- How can we develop code faster?\n- How can we hand off to QA faster?  How can we push changes to Production more quickly?\n\nUsing the Filter results text box, you can filter by a project (example below) or parameter (e.g., Milestone, Label). \n\n![Value stream analytics filtering.](https://about.gitlab.com/images/blogimages/2022-06-dora-vsa-filter.png){: .shadow}\nValue stream analytics filtering.\n{: .note.text-center}\n\nNo login is required to view [Value stream analytics for projects](https://gitlab.com/gitlab-org/gitlab/-/value_stream_analytics) where you can become familiar with stream filtering, default stages and deep-dive tables. For a full view of the DORA metrics, you have to log in with your GitLab [Ultimate-tier](https://about.gitlab.com/pricing/) account or sign up for a [free trial](https://about.gitlab.com/free-trial/).\n\n## How to understand DevOps maturity and benchmark progress with the DORA metrics?\n\nDORA metrics can also provide answers to questions not related to VSA, such as:\n\n- How to become an elite team of DevOps professionals?\n- How do I perform vs. industry standards? \n- Is the organization better at DevOps this year than last?\n\n## Learn more about VSA and DORA:\n\n- Check out the GitLab Speed Run about DORA metrics in VSA:\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/wQU-mWvNSiI\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n- [GitLab DORA metrics API documentation](https://docs.gitlab.com/ee/api/dora/metrics.html)\n\n- [Step-by-step instructions for creating a custom value stream](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#create-a-value-stream-with-gitlab-default-stages)\n",[1466,1128,836,728,727],{"slug":2459,"featured":6,"template":683},"gitlab-value-stream-management-and-dora","content:en-us:blog:gitlab-value-stream-management-and-dora.yml","Gitlab Value Stream Management And Dora","en-us/blog/gitlab-value-stream-management-and-dora.yml","en-us/blog/gitlab-value-stream-management-and-dora",{"_path":2465,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2466,"content":2471,"config":2475,"_id":2477,"_type":16,"title":2478,"_source":18,"_file":2479,"_stem":2480,"_extension":21},"/en-us/blog/take-our-devops-quiz",{"title":2467,"description":2468,"ogTitle":2467,"ogDescription":2468,"noIndex":6,"ogImage":2413,"ogUrl":2469,"ogSiteName":697,"ogType":698,"canonicalUrls":2469,"schema":2470},"Take our DevOps quiz!","From random terms to fun facts, will your DevOps knowledge be up to the task? Take our quiz and find out.","https://about.gitlab.com/blog/take-our-devops-quiz","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Take our DevOps quiz!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-06-16\",\n      }",{"title":2467,"description":2468,"authors":2472,"heroImage":2413,"date":2473,"body":2474,"category":14},[1524],"2022-06-16","We're hoping to stump you...and we stumped ourselves on some of these questions for sure. There are just 10 questions, so dive in, and you'll see your score at the end.\n\n\u003Cdiv data-tf-widget=\"IcsqQQ0H\" data-tf-iframe-props=\"title=Test your DevOps knowledge!\" data-tf-medium=\"snippet\" style=\"width:100%;height:400px;\">\u003C/div>\u003Cscript src=\"//embed.typeform.com/next/embed.js\">\u003C/script>",{"slug":2476,"featured":6,"template":683},"take-our-devops-quiz","content:en-us:blog:take-our-devops-quiz.yml","Take Our Devops Quiz","en-us/blog/take-our-devops-quiz.yml","en-us/blog/take-our-devops-quiz",{"_path":2482,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2483,"content":2488,"config":2494,"_id":2496,"_type":16,"title":2497,"_source":18,"_file":2498,"_stem":2499,"_extension":21},"/en-us/blog/gitlab-and-the-three-ways-of-devops",{"title":2484,"description":2485,"ogTitle":2484,"ogDescription":2485,"noIndex":6,"ogImage":2413,"ogUrl":2486,"ogSiteName":697,"ogType":698,"canonicalUrls":2486,"schema":2487},"GitLab and the three ways of DevOps","DevOps isn't just an esoteric philosophy - it actually is a roadmap for faster and safer software releases, if you choose the right tool. Here's how to take the principles of DevOps and get the most out of the One DevOps Platform.","https://about.gitlab.com/blog/gitlab-and-the-three-ways-of-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and the three ways of DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vlad Budica\"}],\n        \"datePublished\": \"2022-06-15\",\n      }",{"title":2484,"description":2485,"authors":2489,"heroImage":2413,"date":2491,"body":2492,"category":14,"tags":2493},[2490],"Vlad Budica","2022-06-15","\n\nMost of my daily conversations are focused on features and very deep technical concepts, which provide valuable and actionable insight. However, we miss the fact that tools and technology are leveraged to solve business challenges. When talking about features and technology, it's very easy to see the possible financial gain when replacing different tools with a unified platform. But it's missing all the improvement opportunities that will provide value at all the levels of a company from developers to executives.\n\nThe reality is that we're working in very complex systems, making it hard to see the forest from the trees. As an engineer, you're focused on solving the next immediate problem that arises without taking a step back to reevaluate the system itself. In some cases, the problem itself is created by the design of our software development lifecycle (SDLC). As an executive, it's difficult to balance the effort required to address the technical challenges with the pressure that comes from the business in this ever-increasing rhythm of change.\n\nMy goal with this article is to provide a high-level map that contains the most important DevOps principles and a shortcut. I know this is a bold statement as there is a lot of literature on this topic but my approach will be different.\n \nFirst, I'm going to use the [Three Ways](https://itrevolution.com/the-three-ways-principles-underpinning-devops/) as coined in [The DevOps Handbook](https://www.amazon.com/DevOps-Handbook-World-Class-Reliability-Organizations/dp/1942788002) because those are the three foundational principles of DevOps as they were refined from Lean, the Toyota Production System, Theory of Constraints, Six Sigma, and System Thinking principles. Second, I'll reference GitLab as the tool of choice because I think a good tool lets you focus on the work at hand, and GitLab does just that.  \n\n> You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.\n\nHere is a short description of what the Three Ways are, what they're about, and why you should care.\n\n## First Way: Maximize flow\n\nThe First Way is all about making work/value flow better through the whole value stream (left to right), and to do that, we need to have a systems thinking approach and always look at the end-to-end result. In the case of IT, this means we optimize for speed from the moment we had the idea, to generating value with software running in production.\n\nWe need to have a good understanding of the system to find potential bottlenecks and areas of improvement. Our improvements should always lead to better overall performance, be aware of the cases in which local enhancements lead to global degradation, and avoid that.\n\nIn this process, it is crucial to stop defects from passing downstream from one workflow stage to another. Why? Because defects generate waste (of time and resources).\n\n## Second Way: Feedback loops\n\nThe Second Way deals with feedback loops, amplifying and shortening feedback loops so that we get valuable insight into the work we're doing. The feedback can be related to the code that's written or the improvement initiatives. Feedback loops maximize flow from right to left of the value stream.\n\nQuick, strong feedback loops help build quality into the product and ensure that we're not passing defects downstream. The quicker we do this the quicker and cheaper we can solve them, continuously keeping our software in a deployable state. It's easier for a developer to fix a bug when they are working on that change, and the code and the thought process are fresh in their mind. Suppose days or even weeks pass between the moment of the commit and the moment we realize there is a problem with the change. It will be significantly harder to address the problem, not to mention that we probably realized the problem only when trying to deploy the software and we have a service that's not working on our hands. On the flip side, feedback loops enable learning and experimentation, a point on which I’ll return a bit later.\n\nUsually, more developers lead to more productivity but, as presented in [The State of DevOps Report](https://cloud.google.com/blog/products/devops-sre/announcing-dora-2021-accelerate-state-of-devops-report), this is true only for high performers. Why? If we have a team of 50 developers and problems aren't immediately detected, technical debt builds up. Things will only get worse when we have 100 developers because they will generate even more technical debt with every development cycle. A natural tendency would be to add more developers in the hope velocity will get better, but it will degrade, so we add even more developers, and things degrade even more, and deployment frequency starts to suffer as it takes a lot of time to fix all the problems that came from upstream in order to get to a deployable state.\n\n## Third Way: Continuous experimentation and learning\n\nThe Third Way is about creating a culture of trust where continuous experimentation and learning can thrive. This leverages the first two ways in order to be successful.\n\nMaking work flow easily through the value stream enables us to experiment and even take some risks, while failing fast and inexpensively. Feedback loops act as the guardrails that help us keep the risk in check but also facilitate learning because learning happens only when strong fast feedback is available. We can have a scientific approach, experiment with things, and extract the learning and improvement that results from these experiments and their feedback.\n\nThis is an iterative process that will lead to mastery (through increased repetition). This should be coupled with an environment where this local learning becomes global and is integrated into the daily work of all the teams. For this approach to work and start getting some results, 20% of our time should be reserved for these improvement activities. I'm aware how difficult it can be to carve 20% of your time for improvement initiatives when dealing with urgent problems is your full-time job. Protecting this improvement time helps us pay our technical debt and make sure things are not spiraling out of control.\n\n## GitLab and the Three Ways\n\t\nNow that we presented the Three Ways of DevOps, maximizing flow (left to right), feedback loops (maximizing flow right to left) and having a continuous learning process, implementing them requires some effort from a tooling and process perspective.\n\nIt’s time to introduce GitLab into the picture, the only DevOps platform that covers the whole SDLC. Why is this useful for you? Because there is a synergy that happens when all the capabilities you need are provided in the same platform, the result is more than the sum of the components. Additionally, a good tool lets you focus on your work, not on the tool itself, so you can spend more time and effort driving your DevOps transformation. The fact that you’ll spend less money and time integrating different tools is the first immediate return of your investment.\n\nWhen the goal is to maximize flow from left to right, GitLab can facilitate that, starting from idea to production. Having the benefit of being a platform built from the ground up, work can flow from Planning to the commit and source code management stage and forward to CI/CD seamlessly. Any person involved in the SDLC can perform their work from the same UI. All the information they need is available without a need to switch through different UIs while paying the mental context-switching cost associated when using disparate tooling. \n\nGitLab provides different control mechanisms to make sure that if defects are introduced they are isolated and they don’t move downstream. Working in short-lived feature branches, different controls around merging and MR Request Approval rules act as gates. \n\nBy having everything on the same platform it’s easier to understand the whole flow of work, coupling this with our Value Stream Metrics enables everyone involved to get a better understanding of the overall system and find potential bottlenecks and improvement opportunities.\n\n### Improved flow\n\nAs mentioned, flow in one direction - left to right - is not enough to deliver better software products faster. Feedback loops that are quick and provide strong feedback are crucial for great business outcomes. From a developer perspective, the results of the CI pipeline provides immediate feedback about your change. If this pipeline contains security scans it’s even better. Providing feedback from a security standpoint ensures that we’re not deploying vulnerable code and it gives the developer the opportunity to go back and fix it immediately. This is very actionable feedback that also provides a learning opportunity because the security reports come with information about the vulnerabilities, and also where possible, a potential solution to the vulnerability. All this is available for you without any additional work to integrate different tools.\n\nSwitching perspectives, someone that needs to review or approve a code change has everything they need at their fingertips in one place. It’s straightforward to pull in or “@mention” other necessary parties and they’ll get access to all necessary context. A decision can be made immediately and it’s based on accurate and clear feedback that you can trace back to the initial idea. \n\n### Metrics matter\n\nTaking another step back, we get different metrics (Value Stream, Contribution) at the project level. This is one of the advantages that comes with a platform approach, and these insights are very easy to obtain and feed back into the process. When doing software development at scale, more senior managers need this feedback at an even higher level, and, therefore, these are available across multiple teams, projects, or departments. All this information is very valuable from a current perspective, but also it helps guide and shape business decisions. If the velocity isn’t what is needed by the business we can look to remove bottlenecks, improve things or invest in some key areas.\n\nWith these two capabilities in place, we have a framework in which we can iterate quickly and safely. Experimentation becomes easy and very safe, we can test different business hypotheses, and see which ones work best with our customers. This should happen on an ongoing basis because this is the cornerstone of innovation.\n\n### Context is critical \n\nEvery experiment that we perform, every problem that we solve becomes valuable learning that should be accessible to everyone in the organization. Having everything (context, actions, results, learning) in one place enables us to open things up so that everyone can contribute. This requires an environment of trust where everyone feels comfortable to run small experiments that lead to improvements, and where these improvements can diffuse in your entire organization. By having a tool that just works and provides everything you need without any additional work, you gain back capacity that you can use to improve your product, overall system, or organization.\n\nIt’s been a long journey up to this point, with the purpose of taking a look beyond immediate feature comparisons and the immediate financial gain that is realized when replacing multiple tools with one. We looked at the core principles of DevOps as a map in your DevOps transformation and at GitLab as a tool to facilitate that. Improving very complex systems is hard, driving that change through your company is a challenge, knowing that you have a tool that just delivers on your needs you can focus on developing code and on your continuous improvement efforts.\n\nI hope this is useful to everyone involved in the SDLC, from the engineers who need to work with and within the system everyday, to senior leaders who need to deliver business results.\n",[1128,269,1943],{"slug":2495,"featured":6,"template":683},"gitlab-and-the-three-ways-of-devops","content:en-us:blog:gitlab-and-the-three-ways-of-devops.yml","Gitlab And The Three Ways Of Devops","en-us/blog/gitlab-and-the-three-ways-of-devops.yml","en-us/blog/gitlab-and-the-three-ways-of-devops",{"_path":2501,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2502,"content":2507,"config":2513,"_id":2515,"_type":16,"title":2516,"_source":18,"_file":2517,"_stem":2518,"_extension":21},"/en-us/blog/gitlab-15-the-retrospective",{"title":2503,"description":2504,"ogTitle":2503,"ogDescription":2504,"noIndex":6,"ogImage":2413,"ogUrl":2505,"ogSiteName":697,"ogType":698,"canonicalUrls":2505,"schema":2506},"GitLab 15: The retrospective","GitLab was founded in 2011 but that was a world nearly unrecognizable today. Here's a look back at what life was like then.","https://about.gitlab.com/blog/gitlab-15-the-retrospective","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab 15: The retrospective\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-06-13\",\n      }",{"title":2503,"description":2504,"authors":2508,"heroImage":2413,"date":2510,"body":2511,"category":14,"tags":2512},[2509],"Brendan O'Leary","2022-06-13","\nNo cloud native, no containers, and no remote work: Those were just a few of the things _missing_ from the technology landscape in 2011 when we launched GitLab 1.0. It’s been a journey, for sure. Here’s a look back at how far we’ve traveled to get to GitLab 15.\n\n## It started with source code management\n\nIn the beginning of GitLab there was source code management (SCM)... and that was it. Continuous integration (CI) became part of GitLab because our co-founder Dmitriy Zaporozhets got tired of having to keep the CI servers running separately, so we decided to bring continuous integration into the mix. Even then we knew it didn’t make sense for companies to “DIY” critical parts of their process. That being said, it did feel counterintuitive to bring SCM and CI together, but we tried it anyway. Continuous delivery (CD) eventually evolved out of the CI/SCM integration, but it is crazy to think that when we started GitLab, CI/CD was not really a consideration.\n\n## DIY DevOps really did exist\n\nWhat people were talking about, though, was DevOps, and specifically DIY DevOps because back then it was completely normal for teams to assemble a bunch of tools and call it done. When we would talk about the importance of fewer tools and more integration, people would turn up their noses. We heard a lot of “different tools for different things” and “many have sharp tools.” Today we know that a DevOps platform increases development speed and  release cadences. But back then, gluing together tools was seen as normal.\n\n## What’s old is new again\n\nBack in the day there were lots of tools and also very different programming languages than we reach for today. In the 2014 era, developers often wrote code in Ruby or JavaScript, and kept things layers away from the microprocessor. Over the years, that’s changed drastically. [Rust](/blog/rust-programming-language/) and Go – as just two examples – have brought us back to the processor and reflect today’s modern programming styles. It’s another sign of how drastically things have shifted over time.\n\n## It wasn’t cloud-y\n\nThe cloud was in its infancy when GitLab started and at the time we all thought it was probably a great solution for startups or small businesses, but perhaps not something that would ever be in widespread use. Fast-forward to today where most companies run their infrastructures in the cloud. Now it’s widely accepted a cloud native architecture helps teams deliver better software faster and cloud skepticism has drifted away.\n\n## Security was siloed\n\nSecurity teams, and tools, were completely separate entities when GitLab began and that, of course, made doing something inherently difficult even more so. Devs were asked to fix bugs without any context, process, or knowledge of deployment status, and naturally weren’t very excited about it all. Realizing this, we began slowly adding scans to our CI/CD steps so that security was part of the pipeline and not separate from it. The goal is to let developers and teams deal with security in an incremental way, rather than a large to-do list at the end of the process. And that [progress is ongoing](/blog/one-devops-platform-can-help-you-achieve-devsecops/). \n\n## Code review wasn’t integrated\n\nEleven years ago, code review wasn’t that different from security, i.e., it was something done in a distant time and place and without context. Today, merge requests are the hub of all the reviews, including code, security, and compliance, and the concept of “review” is firmly  embedded in the process. Code review itself is now getting a boost from machine learning (ML) with “suggested reviewer,” [a feature we’ve added in beta](/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review/) at the time of this writing but will be coming to all of GitLab at some point during the 15.x releases. \n\n> You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.\n\n## Open source\n\nIt’s fair to say the open source community is stronger and more visible today than it was 11 years ago. GitLab came from the open source community and we continue to proudly define our company and product as open source. Through the years, we’ve tried to keep the open source enthusiasm going by creating an environment where [customers can and do contribute regularly](/blog/how-you-contribute-to-gitlabs-open-devops-platform/) to our product. We want to continue to preserve GitLab as an open source project as well as our community and the company that sustains it all.\n\n## It’s remotely possible \n\nAnd we can’t have a comprehensive retrospective without looking at the concept of remote work. It was practically unheard of in 2011 and, though it’s been normalized today, we spent a long time taking this journey alone. So today’s reality – that [successful asynchronous work](/blog/five-ways-to-scale-remote-work/) means having a platform to enable it – is especially satisfying for us.\n",[1128,111,750],{"slug":2514,"featured":6,"template":683},"gitlab-15-the-retrospective","content:en-us:blog:gitlab-15-the-retrospective.yml","Gitlab 15 The Retrospective","en-us/blog/gitlab-15-the-retrospective.yml","en-us/blog/gitlab-15-the-retrospective",{"_path":2520,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2521,"content":2526,"config":2531,"_id":2533,"_type":16,"title":2534,"_source":18,"_file":2535,"_stem":2536,"_extension":21},"/en-us/blog/how-to-automate-software-delivery-using-quarkus-and-gitlab",{"title":2522,"description":2523,"ogTitle":2522,"ogDescription":2523,"noIndex":6,"ogImage":1660,"ogUrl":2524,"ogSiteName":697,"ogType":698,"canonicalUrls":2524,"schema":2525},"How to automate software delivery using Quarkus and GitLab","Here's a step-by-step guide to automated software delivery using Supersonic Subatomic Java (Quarkus) and GitLab.","https://about.gitlab.com/blog/how-to-automate-software-delivery-using-quarkus-and-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automate software delivery using Quarkus and GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2022-06-09\",\n      }",{"title":2522,"description":2523,"authors":2527,"heroImage":1660,"date":2528,"body":2529,"category":14,"tags":2530},[1344],"2022-06-09","\n\nIn this day and age, organizations need to deliver innovative solutions faster than ever to their customers to stay competitive. This is why solutions that speed up software development and delivery, such as Quarkus and GitLab, are being adopted by teams across the world.\n\n[Quarkus](https://quarkus.io/), also known as the Supersonic Subatomic Java, is an open source Kubernetes-native Java stack tailored for OpenJDK HotSpot and GraalVM, crafted from respected Java libraries and standards. Quarkus has been steadily growing in popularity and use because of the benefits that it delivers: cost savings, faster time to market/value, and reliability. Quarkus offers two modes: Java and native. Its Java mode builds your application using the JDK and its native mode compiles your Java code into a native executable.\n\nGitLab, the One DevOps Platform, includes capabilities for all DevOps stages, from planning to production, all with a single model and user interface to help you ship secure code faster to any cloud and drive business results. Besides DevOps support, GitLab also offers GitOps support.\n\nThe combination of Quarkus and GitLab can empower your developers and operations teams to collaborate better, spend more time innovating to deliver business value and differentiating capabilities to end users.\n\nIn this article, we show how to automate the software delivery of a generated Quarkus application in Java mode using GitLab Auto DevOps. Below we list the steps how to accomplish this.\n\n## Prerequisite\n\nThe prerequisite for the subsequent instructions is to have a K8s cluster up and running and associated to a group in your GitLab account. For an example on how to do this, please watch this [video](https://youtu.be/QRR3WuwnxXE).\n\n## Generate your Quarkus project using the generator and upload to GitLab\n\n- From a browser window, point to the Quarkus generator site, https://code.quarkus.io, and click on the button **Generate your application**.\n\n![Generate Quarkus app](https://about.gitlab.com/images/blogimages/quarkusone.png){:small.center.}\n\nGenerate a sample Quarkus application using the generator\n{: .note.text-center}\n\n- On the popup window, click on the button **DOWNLOAD THE ZIP**, to download a sample Quarkus application in a ZIP file to your local machine. The downloaded file is named `code-with-quarkus.zip`.\n\n- Unzip the file on your local machine in a directory of your choice. This will create a new directory called `code-with-quarkus` with all the files for the sample Quarkus application.\n\n- From a browser window, open https://gitlab.com, and log in using your GitLab credentials.\n\n- Head over to the GitLab group to which you associated your K8s cluster and create a blank project named `code-with-quarkus`.\n\n![Create project code-with-quarkus](https://about.gitlab.com/images/blogimages/quarkustwo.png){: .shadow.small.center.wrap-text}\nCreate project code-with-quarkus\n{: .note.text-center}\n\n- From a Terminal window on your local machine, change directory to the newly unzipped directory `code-with-quarkus` and execute the command `rm .dockerignore` to delete the `.dockerignore` file that came with the sample Quarkus application. After removing this file, execute the following commands to populate your newly create Git project `code-with-quarkus` with the contents of this directory:\n\n**NOTE:** Depending on your version of git installed on your local machine, the commands below may vary. Keep in mind that the goal of the steps below is to upload the project on your local machine to your newly created GitLab project.\n\n```\ngit init\ngit remote add origin https://gitlab.com/[REPLACE WITH PATH TO YOUR GROUP]/code-with-quarkus.git\ngit add .\ngit commit -m \"Initial commit\"\ngit push --set-upstream origin master\n```\n\nAt this point, you should have your sample Quarkus application in your GitLab project `code-with-quarkus`.\n\n## Modify the generated Dockerfile.jvm file and indicate its location\n\nSince the location of the Dockerfile is not at the root level of the project, we need to create a project variable DOCKERFILE_PATH and set it to `src/main/docker/Dockerfile.jvm` to indicate to the Auto Build job where to find the Dockerfile to build the container image.\n\n- From your `code-with-quarkus` GitLab project window, select **Settings > CI/CD** from the left vertical navigation menu.\n\n- Scroll to the **Variables** section on the screen and click on the **Expand** button on the right hand side of the section.\n\n- Click on the **Add Variable** button and enter the following values for the fields in the popup:\n\n```\nKey = DOCKERFILE_PATH\nValue = src/main/docker/Dockerfile.jvm\nType = Variable\nEnvironment scope = All (default)\nProtect variable Flag = ensure this flag is unchecked\nMask variable Flag = ensure this flag is unchecked\n```\n\nThe variable definition should look as follows:\n\n![Add var dockerfilepath](https://about.gitlab.com/images/blogimages/quarkusthree.png){: .shadow.small.center.wrap-text}\nAdd DOCKERFILE_PATH variable to the your code-with-quarkus project\n{: .note.text-center}\n\n- Click on the **Add variable** button to complete adding this variable to your project\n\nIn order for Auto Build to work, we need to make some minor modifications to the generated Dockerfile.jvm in the sample Quarkus application.\n\n- From your `code-with-quarkus` GitLab project window, navigate to the directory `src/main/docker` and click on the file `Dockerfile.jvm`. Click on the **Edit** button to start making changes to this file.\n\n- At the top of the file, you will see about 77 lines of comments. Replace all the lines following the comments with the following code segment:\n\n```\n####\nFROM openjdk:11 as builder\nRUN mkdir /build\nADD . /build/\n\nWORKDIR /build\nRUN ./mvnw package\n\nFROM registry.access.redhat.com/ubi8/openjdk-11:1.11\n\nENV LANG='en_US.UTF-8' LANGUAGE='en_US:en'\n\n# We make four distinct layers so if there are application changes the library layers can be re-used\nCOPY --from=builder --chown=185 /build/target/quarkus-app/lib/ /deployments/lib/\nCOPY --from=builder --chown=185 /build/target/quarkus-app/*.jar /deployments/\nCOPY --from=builder --chown=185 /build/target/quarkus-app/app/ /deployments/app/\nCOPY --from=builder --chown=185 /build/target/quarkus-app/quarkus/ /deployments/quarkus/\n\nEXPOSE 8080\nUSER 185\nENV AB_JOLOKIA_OFF=\"\"\nENV JAVA_OPTS=\"-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager\"\nENV JAVA_APP_JAR=\"/deployments/quarkus-run.jar\"\n```\n\nThe lines above add a build stage called`builder` to do the Java build using the openjdk:11 image and adds a `build` working directory to the process. The rest of the lines are effectively the same as the original except that we have updated the paths of the `COPY` commands to find the appropriate files under the `build` working directory.\n\n- Click on the **Commit changes** button at the bottom of the **New file** window to create the new file.\n\n## Update the application port number\n\nThe Auto Deploy job of Auto DevOps defaults to port 5000 for applications but the sample Quarkus application uses port 8080. So, we need to override this value in the helm chart for the Auto Deploy job. This is how you do it:\n\n- From your `code-with-quarkus` GitLab project window, click on **New File** from the pop-down menu next to project root name directory as shown below:\n\n![Select new file](https://about.gitlab.com/images/blogimages/quarkusfour.png){: .shadow.small.center.wrap-text}\nSelect New file from your code-with-quarkus project top-level directory\n{: .note.text-center}\n\n- On the **New file** window, enter `.gitlab/auto-deploy-values.yaml` for the name of the new file and paste the following two lines as the content of the file:\n\n```\nservice:\n  internalPort: 8080\n```\n\nYour window should look as follows:\n\n![Update application port number for Auto Deploy](https://about.gitlab.com/images/blogimages/quarkusfive.png){: .shadow.small.center.wrap-text}\nUpdate the application port number in the helm chart for Auto Deploy\n{: .note.text-center}\n\n- Click on the **Commit changes** button at the bottom of the **New file** window to create the new file.\n\n## Update the version of the JDK\n\nThe sample Quarkus application includes a unit test that is automatically run by the Auto Test job, which uses a Java version not compatible with Quarkus resulting in “java.lang.UnsupportedClassVersionError” exceptions. To solve this, we need to adjust the Java runtime version to 11 since this is the lowest version of the JRE supported by Quarkus. Let’s do this:\n\n- From your `code-with-quarkus` GitLab project window, click on **New File** from the pop-down menu next to project root name directory and name the new file `system.properties`. As its contents, paste the following line into it:\n\n```\njava.runtime.version=11\n```\n\n- Click on the **Commit changes** button at the bottom of the **New file** window to create the new file.\n\n## Enable Auto DevOps\n\nLastly, we need to enable Auto DevOps for your `code-with-quarkus` GitLab project.\n\n- From your `code-with-quarkus` GitLab project window, select **Settings > CI/CD** from the left vertical navigation menu.\n\n- Scroll to the **Auto DevOps** section on the screen and click on the **Expand** button on the right hand side of the section.\n\n- In the section, check the **Default to Auto DevOps pipeline** checkbox. Then, for Deployment strategy, select on the radio button **Automatic deployment to staging, manual deployment to production**. Finally, click on the **Save changes** button. Here’s an example screenshot:\n\n![Enable Auto DevOps](https://about.gitlab.com/images/blogimages/quarkussix.png){: .shadow.small.center.wrap-text}\nEnable Auto DevOps for your sample Quarkus project\n{: .note.text-center}\n\nThis will launch an Auto DevOps pipeline that will build, test and deploy your application first to the staging environment and then give you the option to manually deploy to 100% of the production environment. The completed Auto DevOps pipeline should look like this:\n\n![Completed pipeline](https://about.gitlab.com/images/blogimages/completed-pipe.png){: .shadow}\nCompleted Auto DevOps pipeline for a sample Quarkus application in Java mode\n{: .note.text-center}\n\n## Conclusion\n\nThe combination of Quarkus and GitLab can empower your developers and operations teams to collaborate better, spend more time innovating to deliver business value and differentiating capabilities to end users.\n\nIn this article, we showed how to automate the software delivery of a generated Quarkus application in Java mode using GitLab Auto DevOps. Here is [a working sample project](https://gitlab.com/tech-marketing/sandbox/hn/code-with-quarkus) of this Quarkus application, whose delivery has been automated by GitLab Auto DevOps.\n\n\n\n\n\n\n\n\n\n\n",[1128,111,269],{"slug":2532,"featured":6,"template":683},"how-to-automate-software-delivery-using-quarkus-and-gitlab","content:en-us:blog:how-to-automate-software-delivery-using-quarkus-and-gitlab.yml","How To Automate Software Delivery Using Quarkus And Gitlab","en-us/blog/how-to-automate-software-delivery-using-quarkus-and-gitlab.yml","en-us/blog/how-to-automate-software-delivery-using-quarkus-and-gitlab",{"_path":2538,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2539,"content":2545,"config":2552,"_id":2554,"_type":16,"title":2555,"_source":18,"_file":2556,"_stem":2557,"_extension":21},"/en-us/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review",{"title":2540,"description":2541,"ogTitle":2540,"ogDescription":2541,"noIndex":6,"ogImage":2542,"ogUrl":2543,"ogSiteName":697,"ogType":698,"canonicalUrls":2543,"schema":2544},"GitLab transforms code review with machine learning tools","Learn how last year's acquisition has resulted in impactful features for the One DevOps Platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668002/Blog/Hero%20Images/pg-gear.jpg","https://about.gitlab.com/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"UnReview a year later: How GitLab is transforming DevOps code review with ML-powered functionality\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2022-06-02\",\n      }",{"title":2546,"description":2541,"authors":2547,"heroImage":2542,"date":2549,"body":2550,"category":14,"tags":2551},"UnReview a year later: How GitLab is transforming DevOps code review with ML-powered functionality",[2548],"Taylor McCaslin","2022-06-02","\n\nA little over a year ago, [GitLab acquired UnReview](/press/releases/2021-06-02-gitlab-acquires-unreview-machine-learning-capabilities.html), a machine learning-based solution for automatically identifying [relevant code reviewers](/stages-devops-lifecycle/create/) and distributing review workloads and knowledge. Our goal is to integrate UnReview’s ML-powered code review features throughout GitLab, the One DevOps Platform. We checked in with Taylor McCaslin, principal product manager, ModelOps, at GitLab, to find out the impact UnReview has had so far and what comes next.\n\n**The idea of applying machine learning to code review was already underway at GitLab before the UnReview acquisition. What was it about ML/AI and automation that seemed a good fit for the code review process? How did the UnReview acquisition affect that strategy?**\n\nThe acquisition of UnReview gave GitLab a practical way to get started with a really focused value proposition that was obvious to the platform. ML/AI is a lot more than just having a useful algorithm. UnReview and its team gave GitLab talent with experience building MLOps pipelines and working with production DataOps workflows. As a source code management ([SCM](/solutions/source-code-management/)) and continuous integration ([CI](/topics/ci-cd/)) platform, MLOps and DataOps are key ambitions for our ModelOps stage. UnReview is the foundational anchor of our AI Assisted group, and we anticipate developing more ML-powered features with the base that we’ve built integrating UnReview into our One DevOps platform. If it’s something you manually set today within GitLab, we’ll consider suggestions and automations: suggested labels, assignees, issue relationships, etc. You can learn more about our plans on our [AI Assisted direction page](/direction/modelops/ai_assisted/).\n\n> You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.\n\n**There were [three specific objectives with the UnReview project](/handbook/engineering/development/data-science/ai-assisted/projects/unreview/#overview) when you first started:**\n- **Eliminate the time wasted manually searching for an appropriate code reviewer to review code changes.**\n- **Make optimum recommendations that consider the reviewers’ experience and optimize the review load across the team, which additionally facilitates knowledge sharing.**\n- **Provide analytics on the state of code review in the project, explaining why a particular code reviewer is recommended.**\n\n**Have you had to change or add to these in any way?**\n\nWe now have Suggested Reviewers running for external beta customers as well as dogfooding it internally. We’ve learned a lot about what makes a good code reviewer. Some of the obvious things like context with the changed files and history of committing to that area of code are obvious. But there are less obvious things like what type of code someone has experience with (front-end or back-end).\n\nWe’re finding the concept of recency interesting: the idea that people who more recently interacted with files and functions may be better suited to review the code. Also, people leave companies, and that’s usually not something that can be inferred by the source graph, so we’re working on merging additional GitLab activity data with the recommendation engine.\n\nIn addition, we’re thinking a lot about bias in our recommendations. For example, a senior engineer likely has the most commits across a project, but we don’t always want to recommend a senior engineer. The more we work with the algorithm and recommendations, the more nuanced we find it.\n\nNot every organization does code review the same way, so we’re considering building different models for those that have no process versus organizations that have very rigid and hierarchical reviewer requirements. We also have to consider how recommendations interact with other features of the platform like code owners, maintainer roles, and commit access.\n\nWe’ve never been more excited about the potential of machine learning within GitLab. Some of the feedback we’ve had from beta customers are “this feels like magic” and that honestly encapsulates what we’re going for. Sometimes the right code reviewer is just a feeling that you can’t quite put your finger on. Through data and a little bit of magic, we may see Suggested Reviewers help speed up workflows, and cut down on back and forth and wasted time trying to find someone to do a great review of your code.\n\n**Introducing ML-powered features can come with challenges, especially being GitLab’s first data science feature. Can you speak to some of those challenges and how the team overcame them?**\n\nIt has been about a year since we closed the transaction. During that time period we’ve introduced a lot of new concepts to GitLab. Access to real-time data within the feature with DataOps extraction and cleaning of platform activity data. We have an end-to-end MLOps pipeline running 100% within GitLab CI that extracts, builds, trains, and deploys the UnReview model, and new observability metrics to know if the whole system is working. These are all foundational concepts that we’ve had to build from the ground up.\n\nAlso, we’ve introduced Python to the GitLab tech stack and have to develop new engineering standards and hiring interview practices to find the right talent for this team. We’re now turning the corner of this foundational work and I anticipate that relatively soon we’ll release Suggested Reviewers fully integrated with the platform and UI.\n\nMilestones have been part of the way we’ve sliced up the integration work. We have a variety of internal milestones we’ve been tracking against, including porting the model into GitLab SCM and CI, building the Dataops and MLOps pipelines, and internal and external customer betas. It’s helpful to have these milestones to know what’s most important at any given time and not to get overwhelmed with all the moving pieces. We’re paving a new path with ML-powered features at GitLab, and once we’re done we’ll have a repeatable process and template to replicate over and over with new data science-powered features.\n\n**What has been the most surprising thing you’ve encountered or learned since UnReview first debuted?**\n\nCode Reviewers are foundational to the software development lifecycle. We thought this would be a really straightforward feature, but it turns out people REALLY care about recommendations. People hate bad suggestions so when the recommendations are wrong, the feedback is fast and furious. But when it’s right, it feels like magic. That really surprised me how positively people respond to a great suggestion.\n\nA lot of GitLab users have asked me what our success metric is for Suggested Reviewers. It should just feel like magic. Maybe you don’t know why someone was chosen, but you just feel they were the right person to review the change. And hopefully that leads to a more thoughtful code review, reduces the back and forth of trying to find someone to review your code, and ultimately creates a better experience end-to-end. A lot of engineers dread code reviews; we want to change that. I hope Suggested Reviewers can take the pain out of the experience and make it something engineers look forward to. That’s the feeling we’re trying to create with our recommendations. Obvious but magic.\n\n**What’s next for UnReview specifically and DevOps code review more generally? Where do you see the next big advances happening?**\n\nWe’re just scratching the surface. There are so many opportunities for recommendations and automations across the platform. We have a lot of data at GitLab, from the source graph, contribution history, CI builds, test logs, security scans, and deployment data. We believe all of this can be integrated together. I’m particularly excited about what we’re calling [Intelligent Code Security](/direction/modelops/ai_assisted/#categories). The idea is that we will be able to look at your source code as you’re writing it, analyze it for security vulnerabilities, and not only suggest fixes to common security flaws, but also apply that change, run your CI, confirm the build succeeds, confirm the vulnerability was resolved, and possibly even deploy that change, all automatically.\n\nImagine the future where your code gets more secure automatically while you sleep. That sounds wild, but we have the data to power [a feature like this in the future](/direction/modelops/ai_assisted/#categories). Suggested Reviewers is just the beginning. We haven’t seen many DevOps platforms fully embrace the data, code, and activity data that they have in a material way. I think we’ll see a lot more in this space moving forward as development platforms identify the massive opportunities to drive efficiencies and remove the frustrating parts of software development from the process.\n",[1128,2266,793,233,771],{"slug":2553,"featured":6,"template":683},"unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review","content:en-us:blog:unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review.yml","Unreview A Year Later How Gitlab Is Being Transformed By Ml Powered Code Review","en-us/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review.yml","en-us/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review",{"_path":2559,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2560,"content":2566,"config":2573,"_id":2575,"_type":16,"title":2576,"_source":18,"_file":2577,"_stem":2578,"_extension":21},"/en-us/blog/the-kubecon-summary-from-a-product-perspective",{"title":2561,"description":2562,"ogTitle":2561,"ogDescription":2562,"noIndex":6,"ogImage":2563,"ogUrl":2564,"ogSiteName":697,"ogType":698,"canonicalUrls":2564,"schema":2565},"How what we learned at KubeCon EU 2022 will impact our product roadmaps","Platform integrations and secrets management are among our product team's primary takeaways. Find out why.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097776/Blog/Hero%20Images/Blog/Hero%20Images/2_2.png_1750097776369.png","https://about.gitlab.com/blog/the-kubecon-summary-from-a-product-perspective","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How what we learned at KubeCon EU 2022 will impact our product roadmaps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-05-31\",\n      }",{"title":2561,"description":2562,"authors":2567,"heroImage":2563,"date":2569,"body":2570,"category":14,"tags":2571},[2568],"Viktor Nagy","2022-05-31","\nAfter two years of only virtual KubeCon events, the GitLab product team was excited to participate in and meet colleagues, partners, and more from our industry at KubeCon EU 2022, held in Valencia, Spain. We were present with four product leaders, a software developer, and a UX researcher. This post summarizes our primary takeaways from the conference, an experience that will affect our roadmaps.\n\nWe will discuss the following topics:\n\n- Internal platforms and GitOps\n- Secrets management\n- Infrastructure integrations\n- WebAssembly a.k.a. WASM\n\nThere were 32 topic types and several 0-day events at KubeCon. Many talks focused on a few tools. Many Cloud Native Computing Foundation ([CNCF](https://www.cncf.io/)) projects had their community meetings during these days. Some talks were given IRL, and others were broadcast virtually with live Q&A. There were a variety of topics and approaches. There were many talks about the various aspects of cluster management, too. However, we left this topic out on purpose because at GitLab we want to focus on the software developers and provide one DevOps platform to support their work. Cluster management is one step away from this focus. Still, we noticed some remarkable patterns as highlighted by the four elements of our list.\n\n> You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.\n\n## Internal platforms and GitOps\n\nCompanies want their developers to focus on their core business. They create internal platforms to hide the complexity of Day 0-2 operations from their software engineers and still allow the \"shift left\" movement of DevOps. These platforms often involve the welding of several tools.\n\nMany talks presented how the given team or company approached their platform problem and what tools they used, and one could often feel the 18-month sweat of a whole platform team trying to come up with a solution.\n\nThese platforms use either a push- or pull-based model for deployments. No single approach is emerging due to legacy applications and different requirements. While there is a definition of GitOps provided by the [OpenGitOps](https://opengitops.dev/) initiative, several presenters offered their own definitions, including of pull-based deployments.\n\nWe fielded a large-scale survey related to secrets at KubeCon, and learned that users would like help with the [Pipeline Authoring](/direction/verify/pipeline_composition/) workflow.\n\nBesides the wiring of the tools, the industry is still looking for a unified approach to multi-tenancy (there might not be one), and sometimes integrating security processes seems overly challenging.\n\n### How does this affect our roadmap?\n\nThere is a lot of potential in building a platform used as the starting point for internal platforms. Imagine a \"tool\" that shortens the time required to create an internal platform to days or weeks instead of a whole year. This is the GitLab vision of The One DevOps platform.\n\nAs a result, we don't plan any changes in our direction. We will continue investing in the recently started [Deployment direction](/direction/delivery/) to provide all the building blocks for a platform in a single tool and are already actively looking for integrated experiences across our offering.\n\nWe’re working on a CI/CD Component Catalog that includes CI templates. This will [support the Pipeline Authoring workflow](https://gitlab.com/groups/gitlab-org/-/epics/7462).\n\n## Secrets management\n\nOne of the things that often came up in our discussions is secrets management. We fielded a large-scale survey related to secrets at KubeCon, and attendees were glad that we’re thinking about this topic. Security is part of the DevOps discussion, and secrets management is a serious issue, especially in a cloud-native aspect.\n\n- Jenkins, GitHub and GitLab were all mentioned during the secret management discussions.\n- Users would like to offload the secrets management responsibility to another product. In many cases, their security requirements are strict, so they don't want/can't handle secrets by themselves.\n- Hashicorp Vault is a preferred tool (primarily in large enterprise companies working in finance or government) to manage and handle secrets. At the same time, most companies would like to avoid operating one more application in their stack.\n- Open ID Connect [OIDC](https://docs.gitlab.com/ee/integration/openid_connect_provider.html) with the JSON web token (JWT) is an essential direction for us.\n\n### How does this affect our roadmap?\n\nWe should invest more in secrets management since this is a pain our customers would like us to solve, and it's becoming a nonstarter feature for many organizations.\n\nWe want to advance in three main vectors:\n\n- Improve our existing secrets management solution - although we don't have a clear solution, we should improve our current variables capabilities to include additional features that could help users leverage variables for secrets. So it would be a \"good enough\" feature they can use. We are actively working toward this direction by removing some of the limitations we have around [variables and masking](https://gitlab.com/groups/gitlab-org/-/epics/1994).\n- Improve our existing [Hashicorp Vault integration](https://docs.gitlab.com/ee/ci/examples/authenticating-with-hashicorp-vault/) using the JWT token, allowing us to integrate with additional vendors (AWS, AZURE, GCP). Like the previous point, we are moving toward this direction by supporting OIDC and [adding audience claims to our JWT token](https://gitlab.com/groups/gitlab-org/-/epics/7335).\n- We need to develop [a clear strategy for a built-in secrets management solution](/direction/govern/pipeline_security/secrets_management/#next-9-12-monhts). In order to provide our users/customers with choice, GitLab wants to use Hashicorp Vault for secrets management handling. We believe that our approach should be not to build the logic ourselves but to leverage an open source, [cloud native](/topics/cloud-native/) project that we could build into GitLab.\n\n## Infrastructure integrations\n\nInfrastructure integrations came in several flavors during the talks. Some are about cluster management, that is not our focus in this blog. Several presentations show that internal platforms need a strong infrastructure aspect, too. When a new project/microservice is started, it might require a new namespace in the cluster with associated RBAC and policies, optionally storage, a source code management repo with automation, and the appropriate permissions. Deployments might create ephemeral environments or could modify the underlying environment within predefined constraints.\n\nThe top tools mentioned in this area are:\n\n- Terraform\n- Crossplane\n- Pulumi\n\n### How does this affect our roadmap?\n\nGitLab already has [great integrations for Terraform](https://docs.gitlab.com/ee/user/infrastructure/iac/), and the other tools are on our radar, too.\n\nWe are open to integrations but cannot currently prioritize the other integrations on our own. We hope that the community will be interested in contributing to benefit everyone.\n\nBuilding Docker containers might not be necessary to get easy-to-manage container binaries. WASM runtimes become available for Kubernetes, and many programming languages can natively compile to WASM. WASM can provide a secure runtime environment without Docker and might be able to simplify the toolchain developers need to learn.\n\nWe don't plan to add direct WASM support to GitLab yet. The generic package registry can hold WASM modules while their deployment is up to the user.\n\nAt the same time, we see a lot of potential in simple runtime environments built around WASM. While GitLab is not in the business of offering runtime services, we will be actively monitoring the market. We might look into more WASM integrations as we see more demand and tools and services maturing in this space.\n\n## GitLab feedback\n\nIt's great to work on a product where the overall sentiment is positive, both from customers that intensely rely on it and from attendees that have to use other tools but would love to use GitLab or just started to play with it recently.\n\nWe received the following notable mentions as feedback:\n\n- Stability and reliability improved over the last several months.\n- Users love our documentation (primarily around CI) - they mentioned it's easy to use and get started with.\n- Given the size of GitLab and the number of our users, we received feedback about long-outstanding issues. We were happy to respond that we are addressing at least some of them shortly.\n- Several customers had asked if we got some resources for migrating from Jenkins to GitLab.\n- A few customers mentioned that they had to move away from GitLab mainly because of an upper-level decision despite favouring GitLab.\n\n## Conclusions\n\n![The GitLab team](https://about.gitlab.com/images/blogimages/kubecon-gitlab-team.jpg)\n\nWe enjoyed all the talks and were delighted to meet and speak with our users and customers. Thanks to all of you, we could \"feel the pulse\" on how we are doing and validate our direction.\n\nWe hope that this blog will guide those who could not [attend KubeCon](https://about.gitlab.com/events/kubecon/) and serve as a summary for those who did attend. All the recordings will likely be available on YouTube from Jun 6, 2022.\n\nLet us know in the comments if you think we missed some important direction.\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality.\nIt is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n",[2572,793,1369,539,1368,1128],"kubernetes",{"slug":2574,"featured":6,"template":683},"the-kubecon-summary-from-a-product-perspective","content:en-us:blog:the-kubecon-summary-from-a-product-perspective.yml","The Kubecon Summary From A Product Perspective","en-us/blog/the-kubecon-summary-from-a-product-perspective.yml","en-us/blog/the-kubecon-summary-from-a-product-perspective",{"_path":2580,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2581,"content":2587,"config":2593,"_id":2595,"_type":16,"title":2596,"_source":18,"_file":2597,"_stem":2598,"_extension":21},"/en-us/blog/gitlab-provides-small-business-with-a-professional-mature-devops-platform",{"title":2582,"description":2583,"ogTitle":2582,"ogDescription":2583,"noIndex":6,"ogImage":2584,"ogUrl":2585,"ogSiteName":697,"ogType":698,"canonicalUrls":2585,"schema":2586},"GitLab provides small business with a professional, mature DevOps platform","Blonk had a small team but a big need for professional software development. Here's how GitLab helped.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668272/Blog/Hero%20Images/blonklogo.png","https://about.gitlab.com/blog/gitlab-provides-small-business-with-a-professional-mature-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab provides small business with a professional, mature DevOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Esther Shein\"}],\n        \"datePublished\": \"2022-05-19\",\n      }",{"title":2582,"description":2583,"authors":2588,"heroImage":2584,"date":2590,"body":2591,"category":14,"tags":2592},[2589],"Esther Shein","2022-05-19","\nBlonk is an international leader in the field of environmental and sustainability research in the agri-food sector. But as a small business without a QA team or a security team, the challenge was figuring out how to deliver professional software with only a few developers.\n\n[Blonk](https://blonksustainability.nl) used an external company to help set up what Bart Durlinger, product development manager, and software devevloper Pieter van de Vijver envisioned as its platform at the time. “They set up an environment on Amazon, a separate built server, a separate repository, and then some scripts in between to link it all together,” Durlinger recalls. “But when we decided to take more control, that was just too complex. We had too many different parts in many different places. We didn't have the capacity at the time to really oversee how this should all work together.”\n\nThat's when the Blonk team started looking for platforms that offered a more integrated approach, with project management, CI/CD, repository, and version control features all in one place.\n\n## Mature, with a modern vision of software development\n\nBlonk turned to GitLab after finding that the platform “had a lot of the things you need to have a professional delivery pipeline integrated into one solution,\" says Durlinger. At the time, the consultancy was using GitHub, which was more expensive, he says.\n\nWhen Blonk started with GitLab, the platform was free, which was a big factor in its selection, van de Vijver says. “But it was also an up-and-coming startup with a vision of that CI/CD integration built into how you envisioned the whole service itself,\" he says. “GitHub was more of a repository that might provide you with those things, but it required more manual setup.”\n \nBlonk liked that GitLab was a mature and stable solution “but still new enough to have a vision of how software is approached nowadays with easy setup and an integrated pipeline by default, and useful branching strategies by which you could support a multi-level, multi-stage deployment process easily,\" Van de Vijver says.\n\nAt the time Van de Vijver was the only one at Blonk with a background as a software developer, and another bonus was his familiarity with all the tools in GitLab. “By using GitLab, we could hit the ground running, and keep the scale small. You don't have to worry about all kinds of CI/CD operations and integrations and the configuration of that but use it just out of the box,” he says.\n\n## How Blonk is utilizing GitLab today\n\nCurrently, Blonk has 38 GitLab premium licenses, about half of which are used by software developers. The rest are used by data scientists, consultants, project managers, and others, so there are different ways the platform is utilized within the company; that also means there are different levels of software literacy but that hasn’t been an issue. The software development team has been onboarding very junior developers over the past couple of months, and “never have I had questions of how to do stuff in GitLab, because the platform is very intuitive,” Durlinger says.\n\nThe software development team has been integrated further into the core business, which also fits nicely with GitLab’s services, including the milestones Blonk uses as well as its repositories and project management strategies. “Also data scientists and methodology developers are now using GitLab projects for the project planning sometimes,” Durlinger notes.\n\nGitLab provided Blonk with a professional software environment for their developers. GitLab also lets the team use pre-built Docker images and a private Python package repository in their CI/CD pipelines, which means faster build times and easy integration, according to Durlinger. “That's a huge change because then we can distribute the work over multiple teams that can work independently on projects,” he says.\n\nThe platform’s automation features have also improved operational efficiency. “We don't need to communicate with external parties, or do any manual steps if we make code changes. We now are in control of managing our software and infrastructure deployment via CDK and gitlab-ci scripts, which makes it fully automated,” Durlinger explains.\n\nIn the project planning stage, Blonk is using GitLab issue templates to define issues, “and that also has really improved the quality of how we define issues to start with,\" Durlinger adds. Blonk has reaped huge benefits from the Agile capabilities of GitLab to plan, manage and monitor their workflows.  \n\nBlonk now has improved transparency and collaboration amongst their teams, and they are using the GitLab Wiki to build an internal knowledge base to optimize productivity and accelerate new developer onboarding.\n\nGitLab has supported the scaling of the developer team from 2 to now 16 developers, going from a single team to 3 software teams and a data science team, all using the One DevOps Platform bringing a much needed single DevOps workflow. Blonk is using the package registry, Docker integration with GitLab, and each team now deploys microservices on AWS. Teams are facilitated via GitLab with enhanced communication and a robust feedback loop.  \n\nProbably the biggest selling point of the platform is that it offers an integrated environment of all solutions related to code management and deployment – from container services to package registry services – everything Blonk wants to use in a pipeline and be able to manage privately, according to Durlinger. The fact that Blonk no longer has to use multiple tools in an ad hoc manner is another benefit. \n\n“What’s really nice is that our non-code artifacts live together with the code,\" Durlinger notes. “Our designs, methodology documents, and prototypes developed by data scientists can all be part of our Gitlab projects. This has improved workflow throughout the organization,” he says.\n\n## Looking ahead\n\n“As Blonk continues its business transformation, GitLab is helping the company maintain its reputation as a reliable and honest company,” Durlinger says. GitLab has added value to their employer brand and makes them more attractive to new developers to join: “It demonstrates that we have a professional environment for software engineers.”\n\nBlonk’s goal is to improve sustainability performance analysis, and ensure that the tools they are building have the same integrity and quality. “GitLab enables us to do this by having a professional project creation pipeline in place,\" Durlinger says.\n",[1128,1528,111],{"slug":2594,"featured":6,"template":683},"gitlab-provides-small-business-with-a-professional-mature-devops-platform","content:en-us:blog:gitlab-provides-small-business-with-a-professional-mature-devops-platform.yml","Gitlab Provides Small Business With A Professional Mature Devops Platform","en-us/blog/gitlab-provides-small-business-with-a-professional-mature-devops-platform.yml","en-us/blog/gitlab-provides-small-business-with-a-professional-mature-devops-platform",{"_path":2600,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2601,"content":2607,"config":2612,"_id":2614,"_type":16,"title":2615,"_source":18,"_file":2616,"_stem":2617,"_extension":21},"/en-us/blog/manager-of-frances-fr-domain-selects-gitlab",{"title":2602,"description":2603,"ogTitle":2602,"ogDescription":2603,"noIndex":6,"ogImage":2604,"ogUrl":2605,"ogSiteName":697,"ogType":698,"canonicalUrls":2605,"schema":2606},"France's .fr domain manager selects GitLab for security","Afnic looks to The One DevOps Platform to modernize its software development with automation, security and compliance, and support for multi-cloud environments.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667869/Blog/Hero%20Images/afniclogo.png","https://about.gitlab.com/blog/manager-of-frances-fr-domain-selects-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Manager of France's .fr domain selects GitLab for its DevSecOps capabilities\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-05-19\",\n      }",{"title":2608,"description":2603,"authors":2609,"heroImage":2604,"date":2590,"body":2610,"category":14,"tags":2611},"Manager of France's .fr domain selects GitLab for its DevSecOps capabilities",[1524],"Association Française pour le Nommage Internet en Coopération ([Afnic](https://www.afnic.fr/en/)) is a longstanding nonprofit in France that manages .fr domain names. Chosen 20 years ago by the French State to operate the .fr country code top-level domain, Afnic’s motto is “reliability first.” Afnic uses GitLab, The One DevOps Platform, to help sustain that motto through modernization of its software development environment.\n\nAfnic’s mission as the French National Top Level Domain Registry is to bring together public authorities, Internet users, and domain name professionals to build a secure and stable Internet, open to innovation and in which the French Internet community plays a leading role. Outages of such a digital service could prevent the provisioning of other services that rely on it and could thus have an impact on key economic and societal activities.\n\nAfnic started using GitLab about four years ago to build and secure the brand-new version of its Shared Registry System (SRS). The SRS is a platform that manages the domain names from the subscription of a domain name to the publication in the DNS database and all the updates during its life, including contacts, server names, and DNSSEC keys, according to Richard Coffre, Afnic’s principal product manager.\n\nSince the project began, all the technologies have changed. Previously, Afnic’s team was mainly using Java and Perl and now they use [Kubernetes](/solutions/kubernetes/), Angular, the latest version of Java, and Docker, among others. Security is paramount, and the team is using private clouds. That means Afnic has its own data centers in France and in colocation facilities all over the world.\n\n## Modernizing software development with automation and integration\n\nAfnic selected GitLab to automate and integrate processes during the deployment process. Previously, the majority of things were done manually and now Afnic’s team wants to follow [DevSecOps philosophy and governance](/topics/devsecops/). They wanted one DevOps platform with state-of-the-art [CI/CD](/topics/ci-cd/) capabilities, the ability to quickly onboard new developers, and features to improve compliance and monitoring functionality.\n\nNow, Gitlab is one of the core components of Afnic’s systems.\n\nThe company’s use of GitLab expanded as they deployed new versions of Java and Docker and other technologies. “We wanted to take a big step to align our technology with the state of the market,” Coffre says, and after surveying the development team, the choice was GitLab.\n\nThe team is integrating GitLab with Jira, which is providing a lot of value, he adds.\n\nNow, in addition to developers, Afnic’s database administrators and network administrators use GitLab. The team is using Docker for images and Ansible. Jira is used for ticketing issues and is linked to GitLab and Confluence as a wiki to create the documentation.\n\n## What GitLab brings to the table\n\nThe goal for Afnic is to increase automation and to have everything in the same place and for anyone to be able to get at the proper version anytime. “That's the strength of GitLab,\" Coffre says. “That's also why we chose it because it's one of the leaders. Like many modern source code management systems, GitLab allows our developers to concurrently create source code. But it does it easily, giving us the possibility to do it safely, remembering our motto.\"\n\nPreviously Afnic used only open source tools that they had to customize, which Coffre says was not efficient on a daily basis. To manage source code properly, the team syncs it to GitLab. The strong focus on community contributions “is a guarantee that its features match the developers’ needs, especially regarding CI/CD,” he adds. \n\nWhen new developers join Afnic, it is very easy to onboard them to GitLab, he says. Another benefit is the cost savings because developers don’t lose source code. There is a time-saving metric, too, because if there is an issue in GitLab, it just requires someone to patch it. \n\nNow developers can focus on higher-value strategic tasks like security and vulnerability compliance, and not manual tests and delays, etc. That frees up developers to focus on their job managing DNS databases because the GitLab platform manages the software development lifecycle end-to-end. Coffre says, “GitLab will provide the foundational platform for all Afnic’s software products moving forward. We have experienced great benefits so far and we are excited to expand our use of this platform into the future”.",[1128,750,793,1369],{"slug":2613,"featured":6,"template":683},"manager-of-frances-fr-domain-selects-gitlab","content:en-us:blog:manager-of-frances-fr-domain-selects-gitlab.yml","Manager Of Frances Fr Domain Selects Gitlab","en-us/blog/manager-of-frances-fr-domain-selects-gitlab.yml","en-us/blog/manager-of-frances-fr-domain-selects-gitlab",{"_path":2619,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2620,"content":2626,"config":2632,"_id":2634,"_type":16,"title":2635,"_source":18,"_file":2636,"_stem":2637,"_extension":21},"/en-us/blog/biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security",{"title":2621,"description":2622,"ogTitle":2621,"ogDescription":2622,"noIndex":6,"ogImage":2623,"ogUrl":2624,"ogSiteName":697,"ogType":698,"canonicalUrls":2624,"schema":2625},"Biden administration updates software supply chain security requirements","GitLab's One DevOps Platform can help agencies comply with government requirements.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667852/Blog/Hero%20Images/eosecurity.jpg","https://about.gitlab.com/blog/biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Biden administration accelerates software supply chain security expectations a year into Executive Order\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-05-12\",\n      }",{"title":2627,"description":2622,"authors":2628,"heroImage":2623,"date":2629,"body":2630,"category":14,"tags":2631},"Biden administration accelerates software supply chain security expectations a year into Executive Order",[746],"2022-05-12","\n\nPresident Joe Biden last year on May 12th signed [Executive Order 14028 \"Improving the Nation’s Cybersecurity\"](https://www.whitehouse.gov/briefing-room/presidential-actions/2021/05/12/executive-order-on-improving-the-nations-cybersecurity/), which called on public and private sector organizations to improve the nation’s cybersecurity with “bold change” and “significant investments”. “Incremental improvements will not give us the security we need,” the EO states. Since then, the administration has only increased the pressure on agencies, forcing them to take a hard look at their software supply chains and justify their application development decisions, including how they use open source code, test their code, and grant permissions.\n\n“The federal government has accelerated its expectations for software supply chain security compliance, yet some organizations are still trying to understand how to broadly and proactively protect their software development,” says [Joel Krooswyk](https://gitlab.com/jkrooswyk), Senior Manager of Solutions Architecture at GitLab. “Agencies and their vendors have been focused on policy management and role-based access, but the federal government wants to go deeper and know where code is coming from and how to better secure it. They are quickly moving down the supply chain.” \n\nThe interest in the origins of software code stems from the complexity of cyberattacks such as that [carried out on SolarWinds](/blog/what-the-solarwinds-attack-can-teach-us-about-devsecops/), as well as the ongoing [log4j](/blog/use-gitlab-to-detect-vulnerabilities/) and Spring4Shell vulnerabilities. “Intentionally malicious contributions can inject code that is literally opening the doors to hackers,” Krooswyk says. “However, agencies and vendors can’t just stop utilizing open source software and microservices. They need the ingenuity of the open source community.” GitLab is a proponent of open source and believes [everyone can contribute](/company/mission/).\n\nThe Biden administration, through its frameworks and mandates, is simply saying, 'we have to keep a better eye on that,' especially as more organizations assume a cloud-first posture, according to Krooswyk.\n\nFor example, earlier this year, the National Institute of Standards and Technology (NIST) published the Software Security Development Framework (SSDF) 1.1, which offers guidance on how to [create tighter controls throughout the software development lifecycle](/blog/comply-with-nist-secure-supply-chain-framework-with-gitlab/).\n\nThe SSDF 1.1 framework recommends: \n- organizations should be prepared by reviewing permissions\n- all components of software should be safe from tampering and unauthorized access\n- software should be produced with minimal security vulnerabilities in its releases\n- organizations should be able to quickly and sufficiently respond to vulnerabilities \n\n## Code sourcing\n\nThe next phase in the federal government’s move to secure the software supply chain will be to [require reporting and/or attestation](/blog/securing-the-software-supply-chain-through-automated-attestation/).\n\n“Agencies and their vendors are being asked if their software is justifiably built using properly sourced code. As a result, organizations may have to explain why they chose to use code from non-mainline repositories,” Krooswyk says.\n\nFor instance, if a DevOps team chooses code from a non-mainline repository originating in China, they will have to attest to why they did that over sourcing from a mainline repository. The same idea applies to pulling clean containers and not repeatedly using those plagued with existing vulnerabilities, according to Krooswyk.\n\nHe believes these questions will all be rolled up into a Cybersecurity & Infrastructure Software Agency (CISA) mandate for a [software bill of materials](https://www.cisa.gov/sbom) (SBOM), which is a list of ingredients that make up software components. “The SBOM will show the list of contributors, known vulnerabilities, results of dependency scans on open source, and more,” he says. “The Biden administration, NIST, and CISA are all in alignment on the need for more consistent software security attestation.”\n\n## How to prepare\n\nWhile some agencies, like the U.S. Department of Defense, might be on the cutting edge of these mandates, smaller agencies or those with more legacy infrastructure and practices might require more effort to be able to comply. “If your development, operations, and security processes aren’t transparent or fully documented and if your scanning is still manual, then these new requirements could be a roadblock,” Krooswyk says. “The administration is only going broader in terms of the scope of mandates and more specific with security requirements as time progresses to plug all the security holes, meaning more regulations and further compliance.”\n\nGitLab believes some of the long-term asks expected to come from the government may include:\n- bake security in, don’t bolt it on\n- ensure scanning is top of mind\n- maintain zero-trust permission models and source code management controls\n- any open source software used should have known origins and support SBOM generation, verifiable by dependency scanning\n- purchase secure commercial off-the-shelf software that complies with all security and labeling requirements from standards bodies\n\nGitLab’s One DevOps Platform can help organizations answer this request for software supply chain security compliance through visibility and transparency into processes, verifiable compliance, zero-trust user management, and templated security automation. “While we are helping organizations with cloud adoption and infrastructure modernization, we’re doing so in such a way as to not compromise on risk or security, providing end-to-end traceability and step-by-step auditability from issue creation through deployment,” he says.\n\nGitLab has a distinct set of features that make enabling NIST frameworks and attesting to code sourcing decisions easier:\n- [SBOM creation](https://docs.gitlab.com/ee/user/application_security/dependency_list/#dependency-list) in a standardized format \n- [Security dashboards](https://docs.gitlab.com/ee/user/application_security/security_dashboard/)\n- [Vulnerability reports and remediation](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/)\n- [Pipeline frameworks and compliance](https://docs.gitlab.com/ee/user/project/settings/#compliance-frameworks)\n- [Security scanning breadth of offering](https://docs.gitlab.com/ee/user/application_security/) from SAST and DAST to fuzz testing \n\nAs the EO states, incremental improvements are not enough to properly secure software. To meet the totality, speed, and sophistication of the administration’s demands for cybersecurity protections, consider adopting GitLab’s One DevOps Platform.\n\n",[1128,750,1488],{"slug":2633,"featured":6,"template":683},"biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security","content:en-us:blog:biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security.yml","Biden Administration Celebrates 1 Year Anniversary Of Eo By Accelerating Software Supply Chain Security","en-us/blog/biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security.yml","en-us/blog/biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security",{"_path":2639,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2640,"content":2646,"config":2652,"_id":2654,"_type":16,"title":2655,"_source":18,"_file":2656,"_stem":2657,"_extension":21},"/en-us/blog/gitlab-heroes-unmasked-elevating-my-company-using-gitlab",{"title":2641,"description":2642,"ogTitle":2641,"ogDescription":2642,"noIndex":6,"ogImage":2643,"ogUrl":2644,"ogSiteName":697,"ogType":698,"canonicalUrls":2644,"schema":2645},"GitLab Heroes Unmasked: How I am elevating my company using GitLab","Tickett Enterprises Limited Director Lee Tickett shares the details of his ongoing journey to use the DevOps platform to its fullest.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667569/Blog/Hero%20Images/heroestickett.jpg","https://about.gitlab.com/blog/gitlab-heroes-unmasked-elevating-my-company-using-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Heroes Unmasked: How I am elevating my company using GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lee Tickett\"}],\n        \"datePublished\": \"2022-05-12\",\n      }",{"title":2641,"description":2642,"authors":2647,"heroImage":2643,"date":2629,"body":2649,"category":14,"tags":2650},[2648],"Lee Tickett","\n_A key to GitLab’s success is our vast community of advocates. Here at GitLab, we call these active contributors [\"GitLab Heroes\"](/community/heroes/). Each hero contributes to GitLab in numerous ways, including elevating releases, sharing best practices, speaking at events, and more. The \"GitLab Heroes Unmasked\" series is dedicated to sharing their stories._\n\nLee Tickett, director at IT development and support consultancy Tickett Enterprises Limited, is a GitLab hero and Core team member who continuously contributes to GitLab and provides exceptional feedback. In late 2020, he [wrote a blog](/blog/lee-tickett-my-gitlab-journey/) about how he came upon GitLab and began to use it as his company's platform.\n\nAt that point, his company was using GitLab in the following ways:\n\n- for version control\n- with a custom merge request approval process\n- as a custom UI for streamlined/standardized project creation\n- as an integration with our bespoke helpdesk platform\n- as a Windows runner with fairly basic CI\n\nThis blog picks up where that blog left off and gives insight into how Tickett Enterprises is making the most of GitLab's One DevOps Platform for its helpdesk, CRM integration, CI/CD, and more.\n\n## Migrating the helpdesk\n\nQuite some time ago, I decided to migrate from the bespoke helpdesk platform and use GitLab for issue tracking. Here's [an epic](https://gitlab.com/groups/gitlab-org/-/epics/5323) I created just over two years ago to start discussing my plans.\n\nI built a bespoke migration tool using C#, which connects directly to the existing\nhelpdesk database and pushes the data into GitLab using the API. This includes:\n\n- groups (each company in our helpdesk will become a group in GitLab with a single `Helpdesk` project)\n- issues (every ticket in our helpdesk will become an issue in GitLab, estimates will be included and quotes converted to weights)\n- notes\n- attachments\n- time logs\n- labels (type, class, department, and \"status\" will be migrated to labels)\n\n### Helpdesk workflow\n\nAfter discussing different approaches with the GitLab team and the community, we came up with the first iteration of our workflow process. The status of tickets in our helpdesk system becomes scoped labels in GitLab. It looks similar to the following:\n\n![Workflow Issue Board](https://about.gitlab.com/images/blogimages/workflow-issue-board.png)\n\nWe have two relatively small teams so we can also leverage boards to distribute and manage\nwork within the team:\n\n![Department Issue Board](https://about.gitlab.com/images/blogimages/department-issue-board.png)\n\nWe will be leveraging the [GitLab Triage](https://gitlab.com/gitlab-org/ruby/gems/gitlab-triage)\nRubyGem and [Triage Ops](https://gitlab.com/gitlab-org/quality/triage-ops) project to handle\nreactive and scheduled automation, such as: \n\n- opening pending issues once they reach their due date (this field has been slightly repurposed)\n- nudging users when issues have been pended, but no due date has been assigned\n- nudging the team when issues have not been triaged (labeled, estimates/quotes attached, etc.)\n\nGitLab triage will run as a scheduled pipeline from inside of GitLab, and Triage Ops (formerly known as Triage Serverless) will run as webhooks in AWS Lambda (triggered by webhooks). We may potentially transition some of our existing customizations from C# to GitLab Triage/Triage Ops, too. \n\n## Building out CRM\n\nOne of the biggest challenges moving our helpdesk over to GitLab was the inability to tie issues to Customers. So, roughly a year ago, I decided to start building out a [Customer Relations Management](https://docs.gitlab.com/ee/user/crm/) (CRM) feature. \n\nYou can see some of the work that has gone into the CRM so far: [CRM Merged MRs](https://gitlab.com/gitlab-org/gitlab/-/merge_requests?scope=all&state=merged&label_name[]=CRM).\n\nIt’s surprising how much work is needed for what seems like a mostly simple feature. Despite careful planning, there were many surprises that caused significant headaches. I was hoping to formally release this in December 2021, but it looks like June 2022 is more feasible now.\n\n### Reporting\n\nCompared to our previous bespoke SQL Server Reporting Services (SSRS) report suite pulling directly from our helpdesk, reporting is very limited. We tried using SSRS with a SQL Server linked to our GitLab Postgres server, but kept hitting walls. We are now moving forward using Google Data Studio (with a direct database connection).\n\nAlthough we still have a way to go, we've managed to achieve some really great results.\n\n![Scheduled Pipelines Report](https://about.gitlab.com/images/blogimages/scheduled-pipelines-report.png)\n\nHere's an example of a report we've started to build to increase the visibility of our scheduled interfaces now that we're leveraging CI/CD more.\n\n### Challenges\n\nOne obstacle we were faced with was the inability to achieve a lot of our goals at the instance level. Some GitLab functionality is at the project level, some at the group, and some at an instance. As a result, we had to create a temporary single root group and create all groups beneath it. \n\n## Moving to Linux/Docker for CI/CD pipelines\n\nWe have almost moved completely to Linux/Docker for our CI/CD pipelines, using several custom images:\n\n- our [custom .NET image](https://gitlab.com/tickett/dotnet.core.selenium) simply adds chromedriver to the default `mcr.microsoft.com/dotnet/core/sdk:latest` image to add Selenium support for UI testing\n- our [custom Android/Gradle image](https://gitlab.com/tickett/docker-android-gradle) provides a stable build environment for our Clover apps (which require v1 APK signing no longer supported in Android Studio).\n\nYou can see sample `.gitlab-ci.yml` templates in the relevant projects.\n\nWe now have our test summary and [coverage visualization](https://docs.gitlab.com/ee/ci/testing/test_coverage_visualization.html) displayed in merge requests, which is a total game changer! \n\n## GitLab for intranet\n\nWe've been using SharePoint for as long as I can remember, and I'm not a fan.\n\nAs great as a WYSIWYG interface is, I believe it brings with it:\n\n- a lack of consistency\n- a pretty awful audit trail\n- no review/approval process\n\nSo let's try and learn from the best. Can we use GitLab pages? Absolutely!\n\nWe picked Hugo purely as it seems the most popular (most forked GitLab pages project template). Similarly, the [Relearn theme](https://themes.gohugo.io/themes/hugo-theme-relearn/) seems to be the most popular for docs. \n\nIt's still a work in progress, but we’re exploring a structure similar to:\n\n```text\nClients\n-Client A\n--System A\n--System B\n-Client B\n--System C\n--System D\nInternal\n-Process A\n-Process B\n```\n\nNot too dissimilar to GitLab, but hugely amplified, we want to pull multiple projects, not just our Hugo repo.\n\nThe following  is our `.gitlab-ci.yml`:\n\n```yaml\nimage: registry.gitlab.com/pages/hugo:latest\nvariables:\n GIT_SUBMODULE_STRATEGY: recursive\ngrab-docs:\n tags:\n   - docker\n image:\n   name: ruby:2.7.5-slim\n script:\n   - cd ${CI_PROJECT_DIR}\n   - gem install gitlab\n   - ruby grab_docs.rb\n artifacts:\n   untracked: true\n\ntest:lint:\n tags:\n   - docker\n image:\n   entrypoint: [\"\"]\n   name: davidanson/markdownlint-cli2\n script:\n   - cp $MARKDOWN_LINT_CONFIG ./.markdownlint-cli2.jsonc\n   - markdownlint-cli2 \"content/**/*.md\"\n needs:\n   - grab-docs\n\ntest:\n tags:\n   - docker\n script:\n   - apk add --update --no-cache git\n   - hugo\n except:\n   - master\n needs:\n   - test:lint\n\npages:\n tags:\n   - docker\n script:\n   - apk add --update --no-cache git\n   - hugo\n artifacts:\n   paths:\n     - public\n only:\n   - master\n needs:\n   - grab-docs\n   - test:lint\n```\n\nThe first `grab-docs` step runs a custom Ruby script to:\n\n- interrogate our GitLab instance, looping through all groups and projects\n- grab the `README.md` and `/doc` folder\n- add frontmatter for last update date and link to the repo \n- update and fix all markdown paths\n\n```ruby\n#!/usr/bin/env ruby\n\nrequire 'fileutils'\nrequire 'gitlab'\n\n$api = Gitlab.client(endpoint: ENV['PRODUCTION_API_ENDPOINT'], private_token: ENV['GITLAB_API_TOKEN'].to_s)\n$projects = $api.projects(per_page: 50)\n\ndef grab_files(project)\n file = $api.file_contents(project.id, 'README.md')\n return unless file&.start_with?('",[269,1943,2651],"user stories",{"slug":2653,"featured":6,"template":683},"gitlab-heroes-unmasked-elevating-my-company-using-gitlab","content:en-us:blog:gitlab-heroes-unmasked-elevating-my-company-using-gitlab.yml","Gitlab Heroes Unmasked Elevating My Company Using Gitlab","en-us/blog/gitlab-heroes-unmasked-elevating-my-company-using-gitlab.yml","en-us/blog/gitlab-heroes-unmasked-elevating-my-company-using-gitlab",{"_path":2659,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2660,"content":2666,"config":2672,"_id":2674,"_type":16,"title":2675,"_source":18,"_file":2676,"_stem":2677,"_extension":21},"/en-us/blog/devops-in-education-2021-survey-results",{"title":2661,"description":2662,"ogTitle":2661,"ogDescription":2662,"noIndex":6,"ogImage":2663,"ogUrl":2664,"ogSiteName":697,"ogType":698,"canonicalUrls":2664,"schema":2665},"DevOps in Education 2021 Survey results","DevOps and GitLab are helping transform higher education. Here's what we learned.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668253/Blog/Hero%20Images/pencil2.jpg","https://about.gitlab.com/blog/devops-in-education-2021-survey-results","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevOps in Education 2021 Survey results\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christina Hupy, Ph.D.\"}],\n        \"datePublished\": \"2022-05-04\",\n      }",{"title":2661,"description":2662,"authors":2667,"heroImage":2663,"date":2669,"body":2670,"category":14,"tags":2671},[2668],"Christina Hupy, Ph.D.","2022-05-04","\n\nIn fall 2021 we launched our second annual DevOps in Education Survey. Over 460 respondents from all regions of the world shared insights on how DevOps and GitLab are transforming higher education. \n\n## Key findings \n\n- **One platform for the win**: Respondents' enthusiasm for teaching GitLab's single DevOps platform increased 190% over 2020; survey takers also pointed to the way GitLab can tie culture to operations as key (up 189% year over year), and they also value student portfolio management (up 200%). \n\n- **CI/CD success**: Academic institutions reported high rates of adoption of GitLab’s CI/CD features both within the classroom and in all other use cases. \n\n- **Flexibility is key**: Deployment flexibility stands out again as a major advantage of GitLab at institutions of higher education. Security and authentication are the primary drivers. \n\n- **GitLab spreads the DevOps love**: Multiple departments within an academic institution are reporting they’re now using GitLab and 21% of respondents said the ability to install multiple instances across a campus was a GitLab advantage (up 6% from 2020).\n\n- **…and more spread = branching out**: Because GitLab has one complete platform, higher ed. respondents report they’re expanding their DevOps footprint to include additional stages like Secure. The three most used stages in education continue to be Source Control Management, Plan, and Verify. Release and Package are also seeing nearly 30% adoption by respondents. \n\n- **Planning features**: Educators find planning features such as multi-level epics, issue tracking features, labels, and project management highly useful tools. \n\n## Why DevOps belongs in the classroom\n\nThe benefits of teaching or learning GitLab came through clearly in the survey. The fact that GitLab is a single DevOps tool was key for 58% of respondents, up from just 20% in 2020. \n\nWhat are the benefits of teaching or learning GitLab?\n\n![Chart of the benefits of teaching or learning GitLab](https://about.gitlab.com/images/blogimages/gleducation2021.png)\n\n## How GitLab in education works\n\nDeployment flexibility is critical to universities because security and server access can be controlled (81%), all while integrating with user authentication systems (54%). The ability to host multiple instances per institution was also a factor for 21% of respondents, up 6% from last year – another sign that cross-campus adoption is growing.\n\nAdvanced features (only available in the Ultimate tier) are used by 35% of respondents, which remained fairly consistent from 2020. Security features including container scanning, SAST, advanced security testing, custom DAST, and compliance management were among the most frequently mentioned. Multi-level epics and free guest users were commonly mentioned as well. \n\n## Use cases and DevOps stages\n\nThe most common use of GitLab in education was source control management with 53% of respondents actively using, followed by Verify (Continuous Integration) at 40%, Plan (issue tracking, labels) 38%,  Manage (authentification, compliance management) at 28%, Package 29% and Release (Continuous Delivery) at 29%. The top four tools other than GitLab used by respondents were GitHub (76%), GitHub Actions (24%), Jenkins (26%), and BitBucket (17%). \n\nFaculty respondents noted the value of bringing industry tools to the classroom. One wrote, “Thank you for the GitLab Program. It makes it possible for us to manage students' software engineering projects in a modern development environment.”\n\n## Leveraging GitLab to boost skills\n\nThe 2021 survey asked an additional question regarding what specific skills are being taught with GitLab in the classroom. The three top skills taught with GitLab are: CI/CD (40%), collaboration and communication (36%), application development and design (30%). Other key skills included understanding process flows and analytics, modern computer technology and architectures, and system architectures. \n\n## About the participants\n\nOf the respondents, 35.9% have and use a GitLab subscription while 37% do not. The majority of respondents (78%) were at a university. There were 50 departments listed in the results; 40% were in a Computer Science Department and 32% in Information Technology. Of those respondents using GitLab, 23 departments were represented. These departments ranged across the academic disciplines including biology, economics, physics, business, and engineering. Respondents were 46% faculty and staff, 41% students, and 7% Administrators. We had a range of respondents from around the world: 39% were from North America, 28% from Europe, 18% from Asia 18%, and 9% from South America. \n\n## GitLab for Education\n\nWe believe that *everyone can contribute*. We are committed to bringing DevOps to education institutions around the world. We provide free, unlimited, top-tier licenses to qualifying educational institutions for teaching, learning, and research. [Learn more here](/solutions/education/). \nAnd see all the results from the [GitLab for Education 2020 Program Survey report](/solutions/education/edu-survey/edu-survey-2020.pdf).\n",[1128,1069,269],{"slug":2673,"featured":6,"template":683},"devops-in-education-2021-survey-results","content:en-us:blog:devops-in-education-2021-survey-results.yml","Devops In Education 2021 Survey Results","en-us/blog/devops-in-education-2021-survey-results.yml","en-us/blog/devops-in-education-2021-survey-results",{"_path":2679,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2680,"content":2686,"config":2693,"_id":2695,"_type":16,"title":2696,"_source":18,"_file":2697,"_stem":2698,"_extension":21},"/en-us/blog/gitlab-heroes-unmasked-lessons-from-early-mistakes",{"title":2681,"description":2682,"ogTitle":2681,"ogDescription":2682,"noIndex":6,"ogImage":2683,"ogUrl":2684,"ogSiteName":697,"ogType":698,"canonicalUrls":2684,"schema":2685},"Overcoming coding challenges to become a valued GitLab hero","Niklas van Schrick shares his journey from learning a new language to becoming an active contributor to GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668009/Blog/Hero%20Images/wrong-way-2.jpg","https://about.gitlab.com/blog/gitlab-heroes-unmasked-lessons-from-early-mistakes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Heroes Unmasked: How a difficult start in coding led to being a valued contributor\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jamie Rachel\"}],\n        \"datePublished\": \"2022-04-26\",\n      }",{"title":2687,"description":2682,"authors":2688,"heroImage":2683,"date":2690,"body":2691,"category":14,"tags":2692},"GitLab Heroes Unmasked: How a difficult start in coding led to being a valued contributor",[2689],"Jamie Rachel","2022-04-26","\n\nA key to GitLab’s success is our vast community of advocates. Here at GitLab, we call these active contributors [\"GitLab Heroes\"](/community/heroes/). Each hero contributes to GitLab in numerous ways, including elevating releases, sharing best practices, speaking at events, and more.  [Niklas van Schrick](https://gitlab.com/Taucher2003), who currently works as a Developer trainee, has been an active GitLab Hero since November 2021. \n\nNiklas upholds the [GitLab values](https://handbook.gitlab.com/handbook/values/), especially for transparency, by sharing his trials and discoveries with self-hosted instances. We all can learn from each other’s mistakes, which leads to collaborating to improve processes and build a better developer experience.\n\nHis journey to becoming a GitLab hero is detailed below.\n\n## A difficult beginning\n\n**Niklas van Schrick:** Every developer has to start somewhere. My journey started in May 2019 with Java and Minecraft. While development in Minecraft is great for fast results, based on my experience it’s not the best idea for a beginner.\n\nIt introduces the problem of learning a framework instead of the actual language and it delayed my learning process. I was able to write some functionality as plugins for a server, but I didn't know a single principle of the underlying language, Java. That led to code that was not easily maintainable. For each new functionality, I was building quickly without properly thinking about the structure of my code.\n\nWhen I was starting out, I learned from another developer. Although it is great to have someone guide you through the process, it can also lead to issues. My mistake was believing everything my mentor told me and not looking for solutions myself. In this way, I learned anti-patterns of code design, which led to a full refactoring of the project at a later time. It also slowed down my learning process further as I was asking for help before searching for solutions myself.\n\n## The importance of joining a developer community\n\n**Van Schrick:** In early 2020, we moved our projects to a self-hosted GitLab instance. It was my first experience with [version control](/topics/version-control/). It was a big advantage because we were able to see previous changes and easily identify the causes of bugs. I recommend using a version control system even in the early stages of your development journey, as it makes many things easier and keeps a history of your work. In the beginning, it is totally fine to just push to master or not even use a remote repository.\n\nA big improvement for me was joining a [developer community](/community/), as there are many developers who are happy to help. I learned many new concepts that were widely used by the developers of that community, and this led to much more maintainable code. You don't even have to actively ask in the community to learn new things.\n\nMost of the time, it is enough to keep up with the messages and read the conversations from others. In a helpful developer community, you always have someone who says, \"Why are you doing it like this? There are better ways to do this,\" and offers suggestions to improve the code.\n\n## Contribute, contribute, contribute\n\n**Van Schrick:** Another big step is to make contributions to open source projects. It allowed me to be part of the code review process from others, and learn from it. My first contribution to an open source project, which was not led by me, was a [typo fix in a GitLab view](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/54834). I was surprised at how well the code review went and how fast it got merged. The positive experience encouraged me to join an open source project as a maintainer, and it has been a great experience. I ended up being an active contributor to the GitLab project and a GitLab Hero.\n\n_GitLab welcomes every new contributor and has [many possibilities for your first contribution](/community/contribute/)._\n",[1943,1488,269],{"slug":2694,"featured":6,"template":683},"gitlab-heroes-unmasked-lessons-from-early-mistakes","content:en-us:blog:gitlab-heroes-unmasked-lessons-from-early-mistakes.yml","Gitlab Heroes Unmasked Lessons From Early Mistakes","en-us/blog/gitlab-heroes-unmasked-lessons-from-early-mistakes.yml","en-us/blog/gitlab-heroes-unmasked-lessons-from-early-mistakes",{"_path":2700,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2701,"content":2707,"config":2713,"_id":2715,"_type":16,"title":2716,"_source":18,"_file":2717,"_stem":2718,"_extension":21},"/en-us/blog/career-spotlight-sre-vs-devops-engineer-vs-devops-platform-engineer",{"title":2702,"description":2703,"ogTitle":2702,"ogDescription":2703,"noIndex":6,"ogImage":2704,"ogUrl":2705,"ogSiteName":697,"ogType":698,"canonicalUrls":2705,"schema":2706},"DevOps careers: SRE, engineer, and platform engineer","Where does an SRE leave off and a DevOps engineer (or platform engineer) begin? Here's what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666685/Blog/Hero%20Images/comparing-confusing-terms-in-github-bitbucket-and-gitlab-cover.jpg","https://about.gitlab.com/blog/career-spotlight-sre-vs-devops-engineer-vs-devops-platform-engineer","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevOps careers: SRE, engineer, and platform engineer\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lauren Gibbons Paul\"}],\n        \"datePublished\": \"2022-04-25\",\n      }",{"title":2702,"description":2703,"authors":2708,"heroImage":2704,"date":2710,"body":2711,"category":14,"tags":2712},[2709],"Lauren Gibbons Paul","2022-04-25","Even if you’re totally happy in your current position, it pays to keep an eye on your DevOps career path and learn about emerging roles, especially given [the way the DevOps space evolves so rapidly](https://www.simplilearn.com/is-a-devops-career-right-for-you-article). \n\nFor example, you might be wondering about the role of site reliability engineer (SRE) as opposed to DevOps engineer (and the totally new position called DevOps platform engineer, more on that later). These are all engineering positions requiring tech expertise and coding chops, but they play distinct roles on the DevOps team. Here’s what you need to know:\n\n## SRE: A seasoned role\n\nAs the title suggests, at a high level, SREs focus primarily on reliability, solving operational, scale, and uptime problems. In 2003, Google originated the SRE role to safeguard the uptime of its site, but it has evolved considerably since the advent of cloud native applications and platforms. Today, SREs concentrate on [minimizing the frequency and impact of failures](https://thenewstack.io/the-evolution-of-the-site-reliability-engineer-sre/) that can impact the overall reliability of a cloud\napplication. \n\nAccording to Glassdoor, SREs typically require a Bachelor’s or graduate engineering or computer science degree. Salaries range widely, according to Glassdoor, hitting about $120,000 after 2 to 4 years of experience but can reach up to [$300,000 and higher](https://www.glassdoor.com/Salaries/us-site-reliability-engineer-salary-SRCH_IL.0,2_IN1_KO3,28.html) at the senior level.\n\nAt least one blogger feels [the SRE title](https://rootly.com/blog/should-you-be-an-sre-or-a-devops-engineer) carries more prestige and earning potential than DevOps engineers.\n\nTypical SRE responsibilities include everything from designing, developing, installing, and maintaining software solutions to working with engineering teams to refine deployment and release processes. Collaboration and communication are important job skills for the SRE role, as they need to work closely with multiple roles across the organization. At the time of this blog's publication, there were 4,000 SRE jobs on Glassdoor. Indeed had more than 5,000 SRE postings and ZipRecruiter showed [nearly 12,000 posts](https://www.ziprecruiter.com/candidate/search?radius=5000&amp;search=site+reliability+engineer&amp;location=Remote) for remote SRE jobs.\n\nPython, Go, and Java were the [most sought-after SRE skills](https://www.indeed.com/jobs=site%20Reliability%20Engineer&amp;l&amp;vjk=829f6081218e60bd) listed on Indeed.\n\nAccording to Indeed, SREs transition to \"DevOps engineer\" at a high rate.\n\n## DevOps engineers bridge the gap\n\nDevOps engineers, on the other hand, concentrate on removing obstacles to production and automation and [making development and IT work well together](https://harness.io/blog/sre-vs-devops/).\n\nLike SREs, DevOps engineers need to be good at working and communicating with others, eliminating barriers to increase speed and quality of code delivery. With typically less need to be on call, the DevOps engineer\nmay have a more favorable work-life balance than an SRE, who can have around-the-clock call.\n\nDevOps engineer work responsibilities include such things as analysis of technology utilized within the company and then developing steps and processes to improve and expand upon them. Project management is another key function, establishing milestones for departmental contributions and establishing processes to facilitate collaboration.\n\nThe educational requirements for the two roles are comparable, with a Bachelor’s degree in computer science or engineering or higher as the usual price of admission.\n\nAccording to Glassdoor, the salary range for DevOps engineers is slightly lower than that of SREs, from a low of about $63,000 up to a high of $234,000 for someone with [2 to 4 years of experience](https://www.glassdoor.com/Salaries/us-devops-engineer-salary-SRCH_IL.0,2_IN1_KO3,18.htm). \n\nDevOps engineer positions are easier to find than SREs. Glassdoor has more than 6,000 DevOps engineer job posts. Indeed has more than 17,000. And ZipRecruiter has [more than 81,000](https://www.ziprecruiter.com/candidate/search?radius=5000&amp;search=devops+engineer&amp;location=Remote) remote DevOps engineer listings.\n\n## New to the game\n\n[Cloud native](/topics/cloud-native) development and the desire to have a unified DevOps platform have brought a new role, the DevOps platform engineer, a position that [works in parallel with the site reliability engineering function](/topics/devops/what-is-a-devops-platform-engineer/).\n\nPlatform engineering teams apply development principles to accelerate software delivery, ensuring app dev teams are productive in all aspects of the lifecycle. Platform engineers focus on the entire software development lifecycle from source to production. From this introspective process, they build a workflow that enables application\ndevelopers to [rapidly code and ship software](https://www.getambassador.io/resources/rise-of-cloud-native-engineering-organizations/).\n\nYou can find a helpful description of the roles of SRE vs. DevOps engineer vs. platform engineer [here](https://iximiuz.com/en/posts/devops-sre-and-platform-engineering/).\n\nBut it’s hard to find much career data for this emerging role. Glassdoor, Indeed, and ZipRecruiter do not yet separate out this role from the category of “DevOps engineer,” and consolidated salary and career path data is not available at this time. It is reasonable to conclude this new role will have higher pay based on rarer skill sets and job experience. Suffice to say, this is a hot area and bears watching.\n\n## Benefits of a DevOps career\n\nThe DevOps industry (and technology as a whole) is constantly evolving. And that creates a lot of opportunities. There are lots of job opportunities cropping up based on how technology changes, and this also means that you can have many chances to learn a new skill and score a role where there is an employee shortage. \n\nThere is a high demand for fresh new talent who are also eager to keep learning and adapting to an ever-changing environment. And in this evolving world of DevOps, the more change that happens means there are endless learning opportunities that will help build you up professionally. This makes you a competitive hire in the future, as well as becoming part of a technological landscape that will always be needed. \n\n## Skills required for a DevOps career\n\nWhether you have goals to become an SRE, a full-fledged DevOps engineer, or start slow and figure out where you want to work in the DevOps space, there are both soft and technical skills that definitely are or may require for you to be successful in whichever role you pursue.\n\nSome soft skills include:\n\n1. **The ability to be flexible.** Projects can stop and start and change at any time for lots of reasons. Things break and get buggy on the regular.  Being able to go with that flow and maintain good levels of productivity and professionalism will take you far. \n2. **Good communication skills.** DevOps projects are rarely simple and not only require the ability to communicate your thoughts but the patience to listen to others. \n3. **Ability to work collaboratively.** There are multiple people involved with any given DevOps project. Be prepared to have discussions about various projects and be part of the development process as a team, not as an individual.\n\nSome of the more technical skills that can help your job pursuits include (but are by no means limited to):\n\n1. **CI/CD.** Aspiring engineers should look for ways to add CI/CD concepts to existing personal projects and code. Creating your own personal projects involving CI/CD is a good way to test your deployment skills while also creating a good proof of skills reference for job interviews. \n2. **Coding skills.** Familiarity with multiple languages, such as Rust, Java, JavaScript, Ruby, Python, PHP, Bash, and many more is important for a DevOps engineer. You need to be able to write and fix issues in multiple programming languages. \n3. **Cloud computing.** Lots of application infrastructures revolved around cloud technologies, so having a basic knowledge of cloud computing will give you a competitive edge. \n4. **Automation knowledge.** A lot of working in DevOps is being able to automate time-consuming processes that need to happen all at once. Diving into some automation knowledge will help you more easily integrate with a new DevOps role. \n\n## The future of DevOps\n\nAccording to a newer Forrester report, future success in DevOps will need people and their organizations to be open to a mindset and technology shift. New tools will come around, common practices may shift, and DevOps teams need to be able to adapt to changes while continuing to work together to deliver top-quality work. \n\nA few trends to keep an eye on as time progresses are serverless computerless architecture, [the rise of DevSecOps](/topics/devsecops/), and low-code/no-code development to deploy applications swiftly with higher agility.\n",[1128,1408,792],{"slug":2714,"featured":6,"template":683},"career-spotlight-sre-vs-devops-engineer-vs-devops-platform-engineer","content:en-us:blog:career-spotlight-sre-vs-devops-engineer-vs-devops-platform-engineer.yml","Career Spotlight Sre Vs Devops Engineer Vs Devops Platform Engineer","en-us/blog/career-spotlight-sre-vs-devops-engineer-vs-devops-platform-engineer.yml","en-us/blog/career-spotlight-sre-vs-devops-engineer-vs-devops-platform-engineer",{"_path":2720,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2721,"content":2727,"config":2731,"_id":2733,"_type":16,"title":2734,"_source":18,"_file":2735,"_stem":2736,"_extension":21},"/en-us/blog/how-a-devops-platform-can-help-solve-5-key-smb-frustrations",{"title":2722,"description":2723,"ogTitle":2722,"ogDescription":2723,"noIndex":6,"ogImage":2724,"ogUrl":2725,"ogSiteName":697,"ogType":698,"canonicalUrls":2725,"schema":2726},"How a DevOps platform can help solve 5 key SMB frustrations","SMBs already wear all of the hats. Here are 5 ways a DevOps platform can ease the burden.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668242/Blog/Hero%20Images/assembly-3830652.jpg","https://about.gitlab.com/blog/how-a-devops-platform-can-help-solve-5-key-smb-frustrations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How a DevOps platform can help solve 5 key SMB frustrations\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-04-25\",\n      }",{"title":2722,"description":2723,"authors":2728,"heroImage":2724,"date":2710,"body":2729,"category":14,"tags":2730},[1364],"\n\nStart-ups and small or medium-sized businesses (SMBs) face plenty of challenges, but several of those hurdles can be eased by [adopting a DevOps platform](https://page.gitlab.com/resources-ebook-smb-beginners-guide-devops.html). A DevOps platform can help not only address the issue at hand but the benefits can spread across the company, [helping it grow in a competitive and unpredictable market](/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform/).\n\nThe United States alone is home to 32.5 million small businesses, making up 99.9 percent of all companies in the country, according to a [2021 report from the Small Business Administration’s Office of Advocacy](https://cdn.advocacy.sba.gov/wp-content/uploads/2021/08/30143723/Small-Business-Economic-Profile-US.pdf). And all of these companies have a tough road to travel – so tough that 20 percent of U.S. small businesses fail within the first year, according to the [U.S. Bureau of Labor Statistics](https://www.bls.gov/bdm/entrepreneurship/entrepreneurship.htm). By the end of the fifth year, about 50 percent are shuttered.\n\nStressed with common problems like worker overload, finding time for collaboration, and meeting customer and market needs, smaller businesses are under a lot of pressure. With SMBs and small or medium-sized enterprises (SMEs) facing such significant challenges, it only makes sense to streamline software development, [speed up deployments](/blog/pipelines-as-code/), automate repetitive tasks and [foster collaboration](/blog/collaboration-communication-best-practices/). Taking all those steps can greatly improve an SMB’s odds of success. \n\nHere’s how a DevOps platform can help take on some major SMB frustrations:\n\n## Ease worker fatigue and improve work/life balance\n\nSMBs, by definition, have fewer employees than their larger, more-established competitors. That means there are fewer people to take on all the tasks that need to be done. And that’s no different for the software development team, which could very well be a team of one. With everyone in an SMB having to wear so many hats and take on so many different jobs, it can be exhausting. That’s not only hard on productivity, it’s hard on employees’ work/life balance, and therefore not good for the business or the workforce.\n\nA DevOps platform offers an environment that fosters communication, collaboration and automation, which help ease the burdens on the IT staff. This will help [get work done more efficiently and faster](/blog/why-improving-continuously-speeds-up-delivery/), leaving employees with more time for other projects.\n\n## Satisfy customers\n\nHow can you find new customers when you’re not a household name? You do it by keeping the buyers you have and pulling in more by satisfying, and even delighting, your customer base. Satisfied consumers stick around, buy more, and give free word-of-mouth marketing.\n\nA DevOps platform helps SMBs create customer satisfaction by automating the customer feedback process and accelerating [software development and deployment](/blog/how-to-keep-up-with-ci-cd-best-practices/). \n\n## Increase communication and collaboration\n\nWorkers in start-ups and small businesses often take on a multitude of projects, and try to chip away at their burgeoning workflows. Meetings – within a department or cross-functional – may be either low priority or tough to arrange. A “heads’ down” attitude is understandable, but means different demographics and perspectives often won’t come together to [better innovate](/blog/pipelines-as-code/) and create more well-rounded products for a wider range of consumers. \n\nA DevOps platform promotes collaboration by eliminating barriers not just between IT workers but within an entire company. And that leads to more innovative features and products, improves productivity, and keeps employees happier and more engaged. Collaborative workers also are continuously learning from each other.\n\n## Adapt to the market with speed and agility\n\nEvery market can be unpredictable. New competitors appear. Customer expectations shift. Supply chain problems affect production. SMBs need to be able to change on a dime, to meet or get ahead of new demands and even new competitors.\n\nA DevOps platform [can keep a business of any size agile](/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform/) by enabling a tech team to scale development and deployment to quickly and efficiently turning ideas into new features or new products.\n\n## Multiply a small business’ tech muscle\n\nSince small businesses, by definition, have fewer people, they obviously have smaller IT departments. They may even have a department of one. That can make it difficult to design, develop and deploy new software, not to mention come up with new and better ways to serve and communicate with customers and the supply chain. When [project planning is a joint, cross-functional effort](/blog/achieve-devsecops-collaboration/) it’s possible to do more with less. And having fewer DevOps tools involved - even having everyone use the same tool - can make a big difference.\n\nA DevOps platform, with automated options for everything from testing to monitoring and [doing GitOps with GitLab](/blog/the-ultimate-guide-to-gitops-with-gitlab/), can lessen the hands-on workload, giving IT people more time for other, more creative, projects.\n",[1128,1528,1428],{"slug":2732,"featured":6,"template":683},"how-a-devops-platform-can-help-solve-5-key-smb-frustrations","content:en-us:blog:how-a-devops-platform-can-help-solve-5-key-smb-frustrations.yml","How A Devops Platform Can Help Solve 5 Key Smb Frustrations","en-us/blog/how-a-devops-platform-can-help-solve-5-key-smb-frustrations.yml","en-us/blog/how-a-devops-platform-can-help-solve-5-key-smb-frustrations",{"_path":2738,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2739,"content":2745,"config":2751,"_id":2753,"_type":16,"title":2754,"_source":18,"_file":2755,"_stem":2756,"_extension":21},"/en-us/blog/making-remote-work-better",{"title":2740,"description":2741,"ogTitle":2740,"ogDescription":2741,"noIndex":6,"ogImage":2742,"ogUrl":2743,"ogSiteName":697,"ogType":698,"canonicalUrls":2743,"schema":2744},"Tangram Vision engineers succeed at remote work with GitLab","The start-up's developers can collaborate efficiently, handling everything from merge requests to code reviews, and providing a single source of the truth.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668018/Blog/Hero%20Images/allremote.jpg","https://about.gitlab.com/blog/making-remote-work-better","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's DevOps platform enables Tangram Vision's engineering team to succeed at remote work\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lauren Gibbons Paul\"}],\n        \"datePublished\": \"2022-04-21\",\n      }",{"title":2746,"description":2741,"authors":2747,"heroImage":2742,"date":2748,"body":2749,"category":14,"tags":2750},"GitLab's DevOps platform enables Tangram Vision's engineering team to succeed at remote work",[2709],"2022-04-21","\n\nOn March 14, 2020, Tangram Vision CEO Brandon Minor flew from Colorado into the Bay Area to meet with COO Adam Rodnitzky. The two had just launched [Tangram Vision](https://www.tangramvision.com/), the company they co-founded to make sensors simpler for robotics, drones, and autonomous vehicles. Their plan was to, each month, alternate working at each other's location. However, that week, the Covid-19 pandemic lockdown began, forcing them to scrap that plan and figure out how to successfully collaborate from afar.\n\n“We didn’t see each other in person again for a very long time. That kicked off our remote work experience,” Minor says.\n\nThe Tangram Vision engineering team started using GitLab's DevOps platform, which enabled them to work together without missing a beat. “GitLab was a key tool that allowed us to work really fluidly in a remote context,” says Minor. “Our engineering team has placed GitLab at the core of our remote workflow because it reinforces our values and perspectives around working well remotely.”\n\nThe Tangram Vision Platform takes care of complex perception tasks like sensor fusion, calibration, and diagnostics built on a scalable data backend that allows engineers to track, optimize, and analyze every sensor in their fleet. Tangram Vision’s SDK includes tools for rapid sensor integration, multi-sensor calibration, and sensor stability, saving robotics engineers months of engineering time.\n\n## Supporting complex collaboration\n\nPerception systems are notoriously hard to get up and running and then maintain over time because of important lower-level activities like sensor integration and calibration. “We make sure all the sensors' data is running smoothly, everything's working together perfectly to basically a plug-and-play level. And then we enable the developers working on top of that to monitor and correct their system over time,” Minor says. \n\nTangram Vision has just launched a user hub that functions as a centralized sensor data center. The user hub joins their multi-sensor calibration module, as well as a multiplexing module that maintains stream reliability for all connected sensors. Developers can access a starter set of perception development tools (Tangram Vision Platform - Basic), which will be available on an open-source hub. Much of the initial user feedback will come through and be managed within repositories hosted on GitLab, both public and private, Minor says.\n\n## GitLab as a core for code\n\nThe engineering team has evaluated other platforms, according to Greg Schafer, senior web architect. “We’ve looked around but we've been very turned off by them for one reason or another. We really haven't swayed in wanting to use GitLab as our core for code,” Schafer says. \n\nThe team uses GitLab to manage branches and merge requests (MRs), boosting efficiency and control. “We were having a bit of a struggle early on managing the short-term flow. It was hard to put down tasks to paper. So, I dove deep into GitLab to see how it could help us there. And now that's what we use. GitLab is my product management tool,” Minor says.\n\nThe alternative, siphoning MRs into tools like Notion and Slack, would have been too cumbersome. “Having code-focused discussions in those places would've been very awkward vs. our current orientation of having those discussions in GitLab. Having that history of MRs and threads has been very useful,” Schafer says.\n\nDoing all of the code reviews in the MR itself builds a paper trail of documentation for the future. That means the team can look back at exactly when a change was introduced and find any discussion about potential trade-offs next to a change. This gives the engineers confidence in understanding the context behind a change months or years after it has been introduced. “It encourages team members to be able to work asynchronously, as that context is not held in any single individual’s head but instead written and made explicit,” Minor says.\n\n## A host of features and options in GitLab\n\nFor Rodnitzky, what stands out about GitLab is that it has a host of features and options in one place. “It’s not just hosting code and MRs and all those discussions and things around that, but also the [continuous integration/continuous delivery], having that tightly integrated is really helpful,” he says. For example, there are different types of reports that might show up on the MRs. GitLab makes it easy to reference different CI steps in the MRs. \n\n“You're not jumping to different websites or services to do that. It’s all in one place, which is super helpful,” he says.\n\nMinor agrees, and adds, “The amount of oversight I have into every process going on, the transparency that gives me as a product manager to make the next decision has been invaluable.” \n\nIt’s not a stretch to say the transparency enabled by GitLab is reflected in Tangram Vision’s business model. “We’re transparent with our customers and developers,” says Minor. “There are a couple of morsels of code that will be private for a while, but, for the most part, the mission of the company is to make any engineer a computer vision engineer. To do that, a lot of education and openness is required. That’s already part of our culture.”\n",[1428,1528,1128],{"slug":2752,"featured":6,"template":683},"making-remote-work-better","content:en-us:blog:making-remote-work-better.yml","Making Remote Work Better","en-us/blog/making-remote-work-better.yml","en-us/blog/making-remote-work-better",{"_path":2758,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2759,"content":2765,"config":2771,"_id":2773,"_type":16,"title":2774,"_source":18,"_file":2775,"_stem":2776,"_extension":21},"/en-us/blog/how-the-dora-metrics-can-help-devops-team-performance",{"title":2760,"description":2761,"ogTitle":2760,"ogDescription":2761,"noIndex":6,"ogImage":2762,"ogUrl":2763,"ogSiteName":697,"ogType":698,"canonicalUrls":2763,"schema":2764},"How the DORA metrics can help DevOps team performance ","The best DevOps teams measure their results. Here's a deep dive into the DORA metrics that matter.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676702/Blog/Hero%20Images/data.jpg","https://about.gitlab.com/blog/how-the-dora-metrics-can-help-devops-team-performance","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How the DORA metrics can help DevOps team performance \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aathira Nair\"}],\n        \"datePublished\": \"2022-04-20\",\n      }",{"title":2760,"description":2761,"authors":2766,"heroImage":2762,"date":2768,"body":2769,"category":14,"tags":2770},[2767],"Aathira Nair","2022-04-20","\n\n_Accelerated adoption of the cloud requires tools that aid in faster software delivery and performance measurements.  Delivering visibility across the value chain, the DORA metrics streamline alignment with business objectives, drive software velocity, and promote a collaborative culture._ \n\nSoftware delivery, operational efficiency, quality - there is no shortage of challenges around digital transformation for business leaders. \n\nCustomer satisfaction, a prominent business KPI, has paved the way for experimentation and faster analysis resulting in an increased volume of change in the software development lifecycle (SDLC). Leaders worldwide are helping drive this culture of innovation aligned with organization goals and objectives. However, it is not always about driving the culture alone; it is also about collaboration, visibility, velocity, and quality. \n\nCloud computing and microservices are driving the cloud-first approach for software delivery, helping to scale them independently, and allowing teams to move faster. But, without DevOps, the team doesn’t have the underlying core to move fast efficiently. DevOps has the power to enable the smallest changes that can have great effects. \n\nThis brings us to the question - how do you measure velocity and impact? Or how do you assess quality, and ensure that it is not hampered by velocity? The latter would be what is commonly referred to as technical debt.\n\n## A continuous journey needs continuous improvement\n\nAny improvement starts with measurement. Measuring and optimizing DevOps practices improves developer efficiency, overall team performance, and business outcomes. DevOps metrics demonstrate effectiveness, shaping a culture of innovation and ultimately overall digital transformation. In the [Accelerate State of DevOps 2021](https://cloud.google.com/blog/products/devops-sre/announcing-dora-2021-accelerate-state-of-devops-report) report by the DevOps Research and Assessment (DORA) team at Google Cloud, which draws insights from 7 years of data collection and research, four metrics are the key to measure software delivery performance.\n\n## What are these metrics?\n\n- Deployment Frequency\n- Lead time for changes\n- Time to restore service\n- Change failure rate\n\n### Deployment Frequency\n\nLet’s start with the velocity of development. Deployment frequency measures how often the organization deploys code to production or releases it to end users. This metric borrows from lean manufacturing concepts, wherein small multiple batch sizes are the preferred approach for higher efficiency and more rapid adjustments.\n\n### Lead time for changes\n\nNow comes the extent of automation in your processes. Lead time for changes measures the time needed to take a committed code to successfully run in production. This is one of the two metrics with significant variance in the data. \n\n### Time to restore service\n\nThis represents a business' capacity. Time to restore service measures the time needed to restore services to the level they were previously, in case of an incident. Here too we see significant variance in the data.\n\n### Change failure rate\n\nAnd finally, we take a look at quality. Changes which cause a failure in the system – a deployment failure, an incident, a rollback or a remedy – all contribute to measuring the change failure rate. \n\n## Driving visibility into the DevOps lifecycle\n\nRecently, Zoopla used DORA metrics to boost deployments and increase automation. Understanding the root cause of their problems helped them make informed adjustments in their process workflows, automation, tools, and more. They recognized the value of using a single platform to overcome roadblocks in velocity and innovation. This brought added visibility into their system which helped improve measurement and analytics. \n\nOur [2021 Global DevSecOps Survey](/developer-survey/) shows engineers are happier when they can focus on innovation and adding value than when maintaining integrations. In fact they would rather focus on higher quality documentation which can further amplify results of investments in DevOps capabilities. Documentation and visibility together drives team performance and competitive advantage. \n\nVisibility driven through [DORA metrics](https://docs.gitlab.com/ee/user/analytics/#supported-dora-metrics-in-gitlab) can uncover bottlenecks such as a dysfunction in code review, allowing management to identify causes of slowdowns in the DevOps lifecycle, and enable engineering leaders to align with business priorities. This delivers continuous improvement and progress towards business goals, promoting a collaborative culture across the organization.\n\nThe team at Zoopla used the GitLab DevOps platform to obtain metrics for deploy frequency, lead time, change fail rate, and time to onboard. \n\nimage_title: ![VSA-DORA](https://about.gitlab.com/images/blogimages/VSA-DORA.png)\n\nThe metrics helped influenced decision making and prioritization at Zoopla. Teams were encouraged to learn from the metrics, and incorporate changes into their planning cycles to keep on the path of continuous improvement. They were successful in measuring improvements and building an efficient engineering team that was flexible in responding to business needs. \n\n[Read more on [how Zoopla used DORA metrics for continuous improvement](/blog/how-zoopla-uses-dora-metrics-and-your-team-can-too/) and the [DORA metrics API in GitLab](https://docs.gitlab.com/ee/api/dora/metrics.html#devops-research-and-assessment-dora-key-metrics-api)]\n",[1128,1408,1428],{"slug":2772,"featured":6,"template":683},"how-the-dora-metrics-can-help-devops-team-performance","content:en-us:blog:how-the-dora-metrics-can-help-devops-team-performance.yml","How The Dora Metrics Can Help Devops Team Performance","en-us/blog/how-the-dora-metrics-can-help-devops-team-performance.yml","en-us/blog/how-the-dora-metrics-can-help-devops-team-performance",{"_path":2778,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2779,"content":2785,"config":2790,"_id":2792,"_type":16,"title":2793,"_source":18,"_file":2794,"_stem":2795,"_extension":21},"/en-us/blog/gitlab-is-now-an-approved-slp-vendor-in-california",{"title":2780,"description":2781,"ogTitle":2780,"ogDescription":2781,"noIndex":6,"ogImage":2782,"ogUrl":2783,"ogSiteName":697,"ogType":698,"canonicalUrls":2783,"schema":2784},"GitLab is now an approved SLP vendor in California","State and local agencies in California can now purchase GitLab licenses at an agreed-upon discount.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668402/Blog/Hero%20Images/code-gitlab-tanuki.png","https://about.gitlab.com/blog/gitlab-is-now-an-approved-slp-vendor-in-california","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab is now an approved SLP vendor in California\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-04-19\",\n      }",{"title":2780,"description":2781,"authors":2786,"heroImage":2782,"date":2787,"body":2788,"category":14,"tags":2789},[1524],"2022-04-19","GitLab is now an approved vendor under the Software Licensing Program (SLP) with the state of California. This contract allows state and local agencies, including educational institutions in California, to purchase GitLab software licenses at an agreed-upon discount, reducing costs and streamlining the procurement process. Under the contract, agencies will have greater access to GitLab’s complete DevOps solution, which empowers organizations to deliver software faster and more efficiently.\n\nEstablished in 1994, [California’s SLP](https://www.dgs.ca.gov/PD/About/Page-Content/PD-Branch-Intro-Accordion-List/Acquisitions/Software-Licensing-Program) is managed by the Procurement Division of the Department of General Services. The program provides government agencies and institutions with discounted rates for software licenses and upgrades, reducing the need for individual departments to conduct repetitive acquisitions. \n\n“There’s an exciting opportunity for public sector agencies to benefit from automated DevOps practices,” says [Bob Stevens](/company/team/#bstevens1), GitLab’s area vice president for Public Sector Federal. “This contract makes it simpler and more cost-effective for agencies to adopt The DevOps Platform, and deliver more resilient and efficient applications while keeping security at the forefront.”  \n\nGitLab believes that this contract, which makes The DevOps Platform more accessible and cost-effective, will expedite the broader adoption of DevOps in the [public sector](/solutions/public-sector/). GitLab’s single application will enable greater collaboration within public sector agencies, allowing teams to partner on planning, building, securing, and deploying software. \n\nTo streamline the process, GitLab will work with channel partners including [Acuity Technical Solutions](https://www.acuitytechnical.com), [Launch Consulting](https://www.launchconsulting.com) and [Veteran Enhanced Technology Solutions](https://veteranets.com/). \n\n“Public sector agencies are under tremendous pressure to transform and streamline their software development processes,” said [Michelle Hodges](/company/team/#mwhodges), GitLab’s vice president of global channels. “We’re proud to extend the power of our platform to a new network of customers via trusted channel partners and to help evolve the ways in which they collaborate on and deliver software.”",[1128,284,750],{"slug":2791,"featured":6,"template":683},"gitlab-is-now-an-approved-slp-vendor-in-california","content:en-us:blog:gitlab-is-now-an-approved-slp-vendor-in-california.yml","Gitlab Is Now An Approved Slp Vendor In California","en-us/blog/gitlab-is-now-an-approved-slp-vendor-in-california.yml","en-us/blog/gitlab-is-now-an-approved-slp-vendor-in-california",{"_path":2797,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2798,"content":2804,"config":2810,"_id":2812,"_type":16,"title":2813,"_source":18,"_file":2814,"_stem":2815,"_extension":21},"/en-us/blog/how-to-learn-ci-cd-fast",{"title":2799,"description":2800,"ogTitle":2799,"ogDescription":2800,"noIndex":6,"ogImage":2801,"ogUrl":2802,"ogSiteName":697,"ogType":698,"canonicalUrls":2802,"schema":2803},"How to learn CI/CD fast","Continuous integration and continuous delivery (CI/CD) are critical to faster software releases and it's less complicated than it seems to get rolling. Here's how to start fast with CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668027/Blog/Hero%20Images/cicd.jpg","https://about.gitlab.com/blog/how-to-learn-ci-cd-fast","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to learn CI/CD fast\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mike Vanbuskirk\"}],\n        \"datePublished\": \"2022-04-13\",\n      }",{"title":2799,"description":2800,"authors":2805,"heroImage":2801,"date":2807,"body":2808,"category":14,"tags":2809},[2806],"Mike Vanbuskirk","2022-04-13","\nContinuous integration and continuous delivery (CI/CD) have become the keystone technical architecture of successful DevOps implementations. CI/CD has a reputation for being complex and hard to achieve, but that doesn’t have to be the case. Modern tools enable teams to get started with minimal configuration and infrastructure management. Here’s how you can “start fast” with CI/CD and get some quick, demonstrable performance wins for your DevOps team.\n\n## What does CI/CD mean?\n\n[CI/CD](/topics/ci-cd/) refers to a system or systems that enable software development to have continuous integration and continuous delivery capabilities. The architecture underpinning CI/CD is typically referred to as a pipeline, as software progresses through various stages akin to flowing through a pipe. What does [continuous integration and continuous delivery](/blog/basics-of-gitlab-ci-updated/) actually mean? Taking some time to explore the more granular details will help us set some goals for getting a fast start with CI/CD.\n\nStarting on the left side of the pipeline, continuous integration encompasses a variety of automation that occurs over the course of multiple stages, designed to test and provide quick feedback on different aspects of code quality, functionality, and security. CI testing can run the gamut from unit tests and linting run locally on a developer workstation, to full integration testing suites and static analysis. Anyone that's ever seen a small code change cause a significant outage or breakage upon reaching production knows the value of automated, repeatable testing, and the downsides of depending on manual testing.\n\nOnce a code change has passed testing, it's time to deploy. In legacy environments, system administrators and operations staff often had to manually transfer and install updates, and reboot servers to deploy new features. This type of manual work simply does not scale to the demands of the modern application ecosystem, and is error prone to boot. With continuous delivery, that code is automatically deployed to servers in a testable and deterministic way. Code [can be staged in environments](/blog/ci-deployment-and-environments/) with less strict SLAs, such as development, staging, and QA. Once it has been verified, the new features can be launched as production workloads. In some environments, \"continuous delivery\" becomes \"continuous deployment\", in which comprehensive testing automatically deploys new code through to production without human intervention.\n\nWhat's the ultimate goal of all this automation? It's what makes a successful software organization: faster deployment cadence.\n\n## Getting started with CI/CD\n\nWith a little background established, now it's time to focus on the key objective: getting up and running quickly. The primary goal here is to get a quick win with a CI/CD implementation to improve deployment velocity, and hopefully drive a larger effort towards standardizing on widespread and effective CI/CD usage.\n\nGetting started with CI/CD can appear daunting. There is a wealth of tools, services, and platforms available to provide specific functionality and end-to-end solutions for CI/CD. Some options like [Jenkins](https://www.jenkins.io) are self-managed; others, including GitLab, have a holistic CI/CD pipeline with integrated version control.\n\n## Build your pipeline\n\nRealistically, there is no magic bullet configuration for CI/CD. Each implementation will be highly dependent on a number of factors: the type of application being deployed, the size and skillset of the engineering team/s, the business requirements, and the scale of the application itself. The design and implementation considerations for an application that might see 100 users per day is vastly different from one that sees 1 million. The same holds true for CI/CD.\n\nBelow are 5 high-level strategies for tackling that first CI/CD pipeline:\n\n### 1. Start small\n\nDon't try to fix everything at once. Attempts to refactor an entire codebase or infrastructure will be a complex process, typically involving multiple layers of approval, discussion, planning, and possible pushback from dependent teams. It's much easier to choose a small subset of the application infrastructure to improve.\n\n### 2. Catch low-hanging fruit early\n\nSome of the simplest and easiest to detect (and fix) errors can end up causing the biggest problems if they make it into production workloads. However, it might not make sense to add unnecessary steps or complexity to the CI/CD pipeline. In this instance, it’s a good choice to configure some automatic testing to take place on developer machines before code is committed. Most Git DVCS providers, including GitLab, allow users to deploy pre-commit hooks. Pre-commit hooks are typically some type of script or automation that are triggered when specific actions occur. For example, when a developer initiates a new commit, a pre-commit hook might check that the code conforms to syntactical and structural standards, and is free from basic syntax errors. Other pre-commit hooks might ensure that unit tests are run successfully before a commit is allowed to proceed into the larger pipeline.\n\n### 3. Make security a part of CI/CD\n\nTests shouldn't just be limited to syntax and logic. Catching security issues early in the software development lifecycle (SDLC) means they are much easier, cheaper, and safer to fix. Adding some basic [static code analysis tools](https://docs.gitlab.com/user/application_security/sast/customize_rulesets/) and dependency checkers can vastly improve the security posture of an application by providing fast feedback and early detection of common security problems and potential vulnerabilities.\n\n### 4. Tailor tests to common issues\n\nMost engineering teams that rely on legacy deployment methodologies should be able to easily identify one or two common, recurring issues in deployments. Perhaps copying application code to servers via SCP always results in broken file permissions, or an [NGINX](https://www.nginx.com) frontend is never properly restarted. For the first iteration of [automated testing](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/), choose these specific issues to address with testing. This serves two purposes; it limits the scope of work and gives the team an achievable [\"definition of done,\"](https://www.leadingagile.com/2017/02/definition-of-done/) and it provides a highly visible success story by fixing the most problematic existing deployment problems. Once a working pipeline has been deployed and there is organizational buy-in, the testing suite can be expanded.\n\n### 5. Automate deployment to lower environments\n\nNew CI/CD implementations should [focus on continuous delivery](/blog/cd-solution-overview/), automatically deploying to a staging environment, and providing a manual decision interface for deploying to production. Continuous deployment is generally a step that should be taken further in the DevOps journey when there is more collective knowledge and technical maturity around automated deployments.\n\n## Get a fast start with CI/CD\n\nA good CI/CD implementation can measurably improve software deployment velocity and is a core pillar of a solid DevOps strategy. However, the first attempt at utilizing CI/CD should eschew heavy, complex deployments whenever possible, instead focusing on a \"batteries-included\" approach that provides teams with a short time-to-value cycle.\n\nOnce CI/CD provides that quick win, engineering teams can build on that momentum and buy-in to scale the solution across the entire organization, improving deployment speed and outcomes throughout.\n",[111,1128,1428],{"slug":2811,"featured":6,"template":683},"how-to-learn-ci-cd-fast","content:en-us:blog:how-to-learn-ci-cd-fast.yml","How To Learn Ci Cd Fast","en-us/blog/how-to-learn-ci-cd-fast.yml","en-us/blog/how-to-learn-ci-cd-fast",{"_path":2817,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2818,"content":2824,"config":2829,"_id":2831,"_type":16,"title":2832,"_source":18,"_file":2833,"_stem":2834,"_extension":21},"/en-us/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform",{"title":2819,"description":2820,"ogTitle":2819,"ogDescription":2820,"noIndex":6,"ogImage":2821,"ogUrl":2822,"ogSiteName":697,"ogType":698,"canonicalUrls":2822,"schema":2823},"6 ways SMBs can leverage the power of a DevOps platform","Bringing a DevOps platform into a small business can be a game changer. It can also cut down on the hat wearing. Here are the top 6 benefits.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668224/Blog/Hero%20Images/inside-our-new-development-team-lead-persona.jpg","https://about.gitlab.com/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"6 ways SMBs can leverage the power of a DevOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-04-12\",\n      }",{"title":2819,"description":2820,"authors":2825,"heroImage":2821,"date":2826,"body":2827,"category":14,"tags":2828},[1364],"2022-04-12","\nA small or medium-sized business (SMB) or enterprise (SME) is likely working with a small staff but facing a big workload and even bigger expectations. Creating applications that will expand the customer base, keep up with a changing market, and take on competitors with deeper pockets can be daunting.\n\nIt’s possible to ease those burdens by choosing a single, end-to-end DevOps platform. Productivity will skyrocket and so will opportunities to [grow the company](https://page.gitlab.com/resources-ebook-smb-beginners-guide-devops.html).\n\nOf course, DevOps offers significant technical benefits, like testing and building at scale with [continuous integration and continuous delivery](/blog/how-to-keep-up-with-ci-cd-best-practices/), a shorter lead time with automated deployment, and [fewer production failures with earlier error detection](/blog/iteration-on-error-tracking/). But a DevOps platform also offers myriad business benefits to help support and expand a start-up or SMB.\n\nHere are six more ways a DevOps platform can help an SMB:\n\n## Improved customer satisfaction\n\nUsing a DevOps platform means iteration can happen faster. And that’s critical for SMBs that need to be able to quickly make changes to meet customer needs. DevOps also provides a way to [better monitor users’ feedback](/blog/cd-unified-monitor-deploy/) and makes it easier to respond with more speed and agility. And it reduces Change Failure Rates, increasing application reliability and stability.\n\nAll of this means SMBs will be more able to give clients what they want and need, all while creating an engaging customer experience. Closer customer ties create trust and keep users loyal to products. \n\n## Better security\n\nA DevOps platform embeds security to help seamlessly achieve a DevSecOps approach, a cornerstone of [incorporating security scanning early in the software development lifecycle](/blog/efficient-devsecops-nine-tips-shift-left/). By integrating testing and security reviews earlier in the process, and by using end-to-end automation, there are more opportunities to quickly and efficiently address any security issues. This reduces the time between designing new, higher-quality features and rolling them out into production. That's the beauty of a platform approach to DevOps – security isn't an afterthought. It’s part of the entire process.\n\nDevOps not only speeds production but creates more secure applications. And, simply put, more secure software makes for a more trusted product offering… and for happier, more satisfied customers.\n \n## True collaboration and innovation\n\nCollaboration is one of the basic tenets of DevOps. By [fostering communication and innovation](/blog/collaboration-communication-best-practices/), DevOps not only encourages developers and IT to work together, it also supports collaboration throughout the entire company. This is one area where SMBs have a huge advantage: With fewer employees, who also might be less set in their ways, collaboration and innovation are inherently more inclusive in a small business. [An SMB or start-up is never too small for DevOps](/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform/). By inviting discussion and assistance from all team members, DevOps creates a culture built around learning from and relying on others’ expertise; it also brings more ideas to the table. \n\n## Happier employees and better retention\n\nThe greatest resource a company has is its people. This is even more true for small companies where the pain of employee dissatisfaction and departure is felt even more acutely. Managers also don’t want projects waylaid because the people driving them are leaving.\n\nTo stop that from happening, it’s critical the workplace [keeps employees happy](/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen/). \n\nRetaining a tech team isn’t just about perks, like in-office meditation pods, cereal stations, and foosball tables. Companies also need to give developers the processes and tools they need to be efficient, add automation, and make it easier to find and fix security and compliance issues. A single, end-to-end DevOps platform offers a solution for all of those issues. In our [2021 Global DevSecOps Survey](/developer-survey/), more than 13% of respondents said DevOps makes developers happier or makes their team more attractive to potential new employees. \n\n## Improved decision-making\n\nSmall or medium-sized businesses may lack their larger competitors’ resources, but their agility helps them quickly turn a big idea into action that grows the customer base and profits. A DevOps platform has built-in processes and methods to help sustain an SMB’s agile advantage as it grows, so innovative ideas can scale more quickly and smoothly into products, and ultimately new lines of revenue. Automate more and with higher visibility to make fewer and better decisions.\n\n## Wear all the hats\n\nIt might be a cliche, but it’s also true: SMB employees have to wear all the hats. Code writing, customer service, trouble-shooting, accounts payable… SMB teams are masters at multitasking, but that’s not always the most productive way to be.\n\nA DevOps platform makes it [easier to reduce context-switching](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/) and work cross-functionally because everyone is using the same tool. Built-in automation reduces the number of tasks that need to be done manually and aids in collaboration. \n\nAt the end of the day, a complete DevOps platform isn’t a shiny toy, it’s a critical SMB tool. Adopting a platform can make an SMB even more nimble, efficient, and able to scale. DevOps readies an SMB to take on bigger competitors with deeper pockets. And that will enable the business to become what its founders and executives envision.\n",[1128,750,111],{"slug":2830,"featured":6,"template":683},"6-ways-smbs-can-leverage-the-power-of-a-devops-platform","content:en-us:blog:6-ways-smbs-can-leverage-the-power-of-a-devops-platform.yml","6 Ways Smbs Can Leverage The Power Of A Devops Platform","en-us/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform.yml","en-us/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform",{"_path":2836,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2837,"content":2843,"config":2848,"_id":2850,"_type":16,"title":2851,"_source":18,"_file":2852,"_stem":2853,"_extension":21},"/en-us/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform",{"title":2838,"description":2839,"ogTitle":2838,"ogDescription":2839,"noIndex":6,"ogImage":2840,"ogUrl":2841,"ogSiteName":697,"ogType":698,"canonicalUrls":2841,"schema":2842},"Can an SMB or start-up be too small for a DevOps platform?","It may sound counter-intuitive but even a very small company or startup can take advantage of the power of a DevOps platform. Here's how.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668213/Blog/Hero%20Images/innersourcing-improves-collaboration-within-an-organization.jpg","https://about.gitlab.com/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Can an SMB or start-up be too small for a DevOps platform?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-04-06\",\n      }",{"title":2838,"description":2839,"authors":2844,"heroImage":2840,"date":2845,"body":2846,"category":14,"tags":2847},[1364],"2022-04-06","\n\nIf you work in an IT team of five people – or maybe you’re even a team of one – it’s easy to think your business is simply too small to use DevOps.\n\nBut that’s not the case. A start-up or small and medium-sized business (SMB) is never too small to take advantage of a DevOps platform. \n\nIn fact, DevOps is [a great fit for a lot of SMBs](https://page.gitlab.com/resources-ebook-smb-beginners-guide-devops.html), or small and medium-sized enterprises (SMEs). Here’s how to understand if it will work for your team or organization and how it could help grow your business in a competitive environment.\n\n## The size of the business isn’t the issue\n\nLet’s be clear. If you are developing software, you need a DevOps platform. Size isn’t really the issue. No matter how small your business and your tech team, if you are iterating on software features, building applications, or automating parts of your product-related systems, then you do need DevOps. DevOps will even work for a team of one.\n\nHere’s how a DevOps platform can help an SMB:\n\n### Start small to foster innovation\n\nOne of the key aspects of DevOps is that it creates a [collaborative atmosphere](/blog/collaboration-communication-best-practices/), even beyond the software and IT teams. Adopting a single, end-to-end DevOps platform when your company is small or your start-up is just getting off the ground will enable and encourage everyone – whether they’re in a technical role or work in accounting, sales or as a business manager – to all work together. And that will foster innovation by bringing in ideas from people in a range of demographics and business interests. And innovative ideas will help new businesses get a foot in the door and help all SMBs grow into more successful and bigger companies.\n\n### Optimize your SMB for speed\n\nTo get established in the market, start-ups and small businesses need to deliver compelling products quickly, and be able to efficiently support them. DevOps will enable your team to [move from planning to production](/blog/pipelines-as-code/) faster and with greater ease. A DevOps platform extends through the entire software development lifecycle, from planning all the way through to launching new features, conducting analysis, and gathering feedback. Simply put, DevOps will optimize your organization for speed, which is just what SMBs and SMEs need.\n\n### Use DevOps to take on the “deep pockets”\n\nAs an SMB, you likely don’t have the deep pockets and market penetration of your more-established competitors. How do you boost your odds when taking them on? One way to increase your competitiveness is to use DevOps to boost speed and efficiency as you create new products, new services, and new ways to communicate with your customers. When you can deploy innovative ideas faster than your competitors, you’ll have a definite advantage.\n\n### Decrease your workload with automation\n\nWhen you have fewer hands to take on a huge workload, you need a way to not only speed production but to ease the number of tasks you’re facing – and all the headaches that come along with them. The [automation that is part of a DevOps platform](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/) will mean less manual work when it comes to processes like design, testing, development, deployment, and monitoring. Automation helps small teams free up time to handle all the other projects on their to-do lists. \n\n### Build security into software from the get-go\n\nWhen a company is getting started, it’s the perfect time to use DevOps to help build security into the code and processes from the very beginning. Small companies and startups need to “shift left” and focus on [security at the earliest stages](/blog/want-secure-software-development-our-top-5-tips-to-bring-dev-and-sec-together/). When security is baked in from the start, you won’t have to go back in later on to fix problems that could jeopardize your customers and your business.\n\n### Use DevOps to avoid silos\n\nMaybe your company is small enough that silos aren’t a problem… yet. But as a company grows, people often naturally separate off into [silos or groups that do not communicate with or understand each other](/blog/developing-a-successful-devops-strategy/). And they definitely don’t work well together. By fostering collaboration among IT teams and even non-technical groups across the business, a DevOps platform makes it easier to keep these silos from forming in the first place, and to break them down if they do form. As companies grow from 10 employees to 100 (or more), DevOps will help an organization stay connected and collaborative as it expands.\n\n### Start early to ensure collaboration \n\nIt’s easier to create a collaborative culture from the very beginning – when a company is still a start-up or an SMB – than to overhaul a large, established organization. Instilling an environment of [communication and collaboration](/blog/if-its-time-to-learn-devops-heres-where-to-begin/) is less disruptive and easier to manage in a company of 10, 25, or even 100 than in a much larger and complex business that is adding hundreds of employees a year. SMBs have the “nimble” advantage, meaning that change is easier than for larger competitors. \n\nSo there is no company too small to take advantage of a DevOps platform.\n",[1128,1428,1428],{"slug":2849,"featured":6,"template":683},"can-an-smb-or-start-up-be-too-small-for-a-devops-platform","content:en-us:blog:can-an-smb-or-start-up-be-too-small-for-a-devops-platform.yml","Can An Smb Or Start Up Be Too Small For A Devops Platform","en-us/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform.yml","en-us/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform",{"_path":2855,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2856,"content":2862,"config":2869,"_id":2871,"_type":16,"title":2872,"_source":18,"_file":2873,"_stem":2874,"_extension":21},"/en-us/blog/kontra-and-gitlab-integrate-vulnerability-education-into-the-devops-workflow",{"title":2857,"description":2858,"ogTitle":2857,"ogDescription":2858,"noIndex":6,"ogImage":2859,"ogUrl":2860,"ogSiteName":697,"ogType":698,"canonicalUrls":2860,"schema":2861},"Integrating vulnerability education into DevOps workflows","Interactive training labs are now available within the GitLab platform from Kontra Application Security, a ThriveDX company.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668199/Blog/Hero%20Images/KontraCover.png","https://about.gitlab.com/blog/kontra-and-gitlab-integrate-vulnerability-education-into-the-devops-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Kontra and GitLab integrate vulnerability education into the DevOps workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gyan Chawdhary\"}],\n        \"datePublished\": \"2022-03-31\",\n      }",{"title":2863,"description":2858,"authors":2864,"heroImage":2859,"date":2866,"body":2867,"category":14,"tags":2868},"Kontra and GitLab integrate vulnerability education into the DevOps workflow",[2865],"Gyan Chawdhary","2022-03-31","\n\nInteractive training labs are now available within the GitLab DevOps platform from Kontra Application Security, a ThriveDX company. This integration allows GitLab users to access Kontra’s interactive security training modules from the familiar Merge Request (MR) and pipeline experiences to quickly learn about and fix vulnerabilities reported through automated security scans.\n\nKontra’s content is also available in GitLab’s vulnerability management features, providing the same easy access to training on vulnerabilities identified from these same security scans, as well as other sources such as penetration tests or bug bounty programs. By putting interactivity into our learning simulations, we put the developer first, helping them to understand the risk and impact of a vulnerability from an attacker's perspective.\n\n## So, what is Kontra?\n\nKontra is a scalable Application Security Training platform powered by ThriveDX. This training application was built for modern development teams and it aims to give developers the most advanced security simulations for the best quality training. Kontra works by creating short educational sessions of real-life security incidents to give developers the necessary skills to build and maintain secure application code. \n\nBy going through a simulated security scenario, developers gain better insight into how to get ahead of would-be cyber attackers. \n\n## The benefits of interactive developer security education\n\nAs enterprise developers become increasingly responsible for the security and integrity of their applications, they require relevant, actionable, and engaging security education that enables them to:\n\n- quickly understand and resolve security vulnerabilities\n- design controls to proactively prevent security issues\n- confidently communicate and assign security issues within engineering teams\n\nUnfortunately, these skills are almost never taught in academic courses or coding bootcamps. To address this gap, enterprise software developers often undergo annual developer security training, which typically involves consuming a PowerPoint presentation or watching a recorded presentation on software vulnerabilities and issues. The problem with this style of training is that it lacks actionable explanations, is too passive, or contains generic content that doesn't resonate with developers and engineers.\n\nKontra’s short training sessions are designed to be played in less than five minutes, ensuring that the correct explanations are provided to the developer to fully understand the security impact of a reported vulnerability and how to address it. The short sessions also make it easier to apply the security fix to the code.\n\n## The elements of interactive training\n\nThe most important aspect of training and education is how you convey and communicate ideas visually. This requires strong visual design, empathy, aesthetics, and communication with the learner. Kontra’s interactive training tutorials are offered in multiple programming languages and frameworks, ensuring each lesson is relevant to the developer.\n\nKontra’s learning environment consists of many different interactive UI elements which, depending on a specific vulnerability, are dynamically shown to the learner, ensuring that both the context and the impact of a vulnerability are demonstrated. \n\n![Kontra learning console](https://about.gitlab.com/images/blogimages/Kontra1.png){: .shadow}\n\n## How developers experience the vulnerability education integration\n\nTo have the highest impact, training is placed prominently, yet unobtrusively, where developers spend time: in MRs and pipelines. Developers can view vulnerabilities found by automated security scans in a dedicated MR security widget as well as a pipeline security tab. Clicking on a vulnerability shows its details such as a description and any identifiers such as a [Common Vulnerabilities and Exposures (CVE)](https://cve.mitre.org/) or [Common Weakness Enumeration (CWE)](https://cwe.mitre.org/). Once enabled, GitLab can now place a link to a relevant training from Kontra right in this details view. The identifier is used to dynamically locate the relevant content. And for security professionals, the same training content is available when viewing vulnerability details pages from GitLab’s Vulnerability Reports. \n\n## How to install and configure Kontra training\n\nKontra’s training is available to all [GitLab Ultimate](/pricing/ultimate/) customers. Simply [enable it](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#enable-security-training-for-vulnerabilities) for any desired projects.\n\n\n![Kontra security configuration](https://about.gitlab.com/images/blogimages/Kontra3.png){: .shadow}\n\nThen, look at the results from a [GitLab security scan](https://docs.gitlab.com/ee/user/application_security/#security-scanning-tools) (or one of GitLab’s [integration partners](/partners/technology-partners/#security)) in an MR, pipeline security tab, or a vulnerability details page. When you open a vulnerability record, you will see a direct link to training. GitLab will pull a training from Kontra that most closely matches the particular security issue and the specific language or framework in which it was detected.\n\n![Kontra predictable pseudorandom number generator](https://about.gitlab.com/images/blogimages/Kontra2.png){: .shadow}\n\n_Chawdhary is head of application security at ThriveDX SaaS._\n",[233,750,1128],{"slug":2870,"featured":6,"template":683},"kontra-and-gitlab-integrate-vulnerability-education-into-the-devops-workflow","content:en-us:blog:kontra-and-gitlab-integrate-vulnerability-education-into-the-devops-workflow.yml","Kontra And Gitlab Integrate Vulnerability Education Into The Devops Workflow","en-us/blog/kontra-and-gitlab-integrate-vulnerability-education-into-the-devops-workflow.yml","en-us/blog/kontra-and-gitlab-integrate-vulnerability-education-into-the-devops-workflow",{"_path":2876,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2877,"content":2882,"config":2887,"_id":2889,"_type":16,"title":2890,"_source":18,"_file":2891,"_stem":2892,"_extension":21},"/en-us/blog/if-its-time-to-learn-devops-heres-where-to-begin",{"title":2878,"description":2879,"ogTitle":2878,"ogDescription":2879,"noIndex":6,"ogImage":2146,"ogUrl":2880,"ogSiteName":697,"ogType":698,"canonicalUrls":2880,"schema":2881},"It's time to learn DevOps and here's where to begin","DevOps is a unique blend of tech, tools and culture. Take it step-by-step and it's easy to learn. This simple guide shows you how to get started. Learn more here!","https://about.gitlab.com/blog/if-its-time-to-learn-devops-heres-where-to-begin","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"It's time to learn DevOps and here's where to begin\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-03-10\",\n      }",{"title":2878,"description":2879,"authors":2883,"heroImage":2146,"date":2884,"body":2885,"category":14,"tags":2886},[1364],"2022-03-10","\n\nIf you’re fairly new – or really new – to a DevOps team, you’ve made a great career move, but you probably [have a lot to learn](/topics/devops/devops-beginner-resources/). To truly learn DevOps, there are technologies and processes to figure out, phases to understand, and a [whole new mindset to adopt](/blog/soft-skills-are-the-key-to-your-devops-career-advancement/). \n\n## Learn DevOps, where to start?\n\nLearn DevOps? Why? Where?... Since the demand for DevOps professionals is hot and salaries for this [dynamic job sector](/blog/four-tips-to-increase-your-devops-salary/) are on the rise, there are a lot of DevOps beginners trying to figure out what to learn first. But don’t worry: We can help. \n\nWith a lot on [your learn DevOps to-do list](https://learn.gitlab.com/beginners-guide-devops/guide-to-devops), we’ll walk you through where you should start, including figuring out what DevOps is all about, the stages of the DevOps lifecycle, and the uniquely [collaborative culture](/blog/engineering-teams-collaborating-remotely/). \n\n## What DevOps is really all about\n\nIn the past, software development was done using a complicated and confusing jumble of tools and workflows. Both projects and teams often were siloed, which meant they weren’t coordinating efforts or sharing best practices. It was a frustrating and inefficient process that led to deployment traffic jams, costing teams time and money. There were a lot of headaches.\n\nThink of DevOps as a way to simplify development and deployment, while making the entire process more efficient. With DevOps, once-siloed teams, tools, and workflows are combined in a software development ecosystem. That ecosystem enables teams to plan, create and deliver more efficiently, securely, and collaboratively. \n\n## What to learn for DevOps\n\nDevOps also puts a focus on automation, shifting security left, and making practices not only repeatable but measurable. That speeds development cycles and slashes the time between designing new features and rolling them out into production.\n\nBecause of this efficiency and the enablement of teamwork, DevOps makes not only your software delivery more agile, it makes your entire company more agile. DevOps enables the business to pivot quickly, answering new and critical customer needs, responding to changes in the market and adjusting to stay ahead of the competition. \n\n## To learn DevOps, collaborate\n\nDevOps is built around a culture of collaboration that encourages teammates to share ideas and help each other. It’s not simply something that’s suggested and it’s not something that’s done in a meeting or two. Collaboration is a [core principle](/blog/4-must-know-devops-principles/) of DevOps. \n\nIt's easy to think that to learn DevOps means focusing on programming languages, security, and CI/CD. Those skills and technologies are critical but don’t dismiss the idea of collaboration. It’s about communication, and working together to create something new and to fix problems. However, DevOps professionals also collaborate with other departments, like security, marketing, and the C-suite. You’re all pulling in the same direction.\n\nIn the [2021 Global DevSecOps Survey](/developer-survey/), survey respondents consistently said communication and collaboration skills were key to their future careers. \n\n## The key stages of the DevOps lifecycle\n\nThere’s a definite flow to DevOps, with the process moving from planning and developing all the way through to deployment, monitoring, and feedback. There are three basic stages, or phases – build, test, and deploy. Within these are nine other stages that will help you produce software efficiently, reliably, and with speed and agility.\n\n- Planning focuses on everything that happens before a single line of code is written.\n- Creating is about designing and developing.\n- Verifying checks the quality of the code.\n- Packaging applications and dependencies, managing containers, and building artifacts maintains a consistent software supply chain. \n- Release, or deployment, is all about moving code updates into production as iterations are ready.\n- Configuring is focused on creating, managing, and maintaining application environments.\n- Monitoring is about checking the status of software and networks.\n- Protecting is all about securing your applications and their environment.\n- Managing runs end-to-end through your software development lifecycle, controlling permissions and processes. \n\n## What it means to shift security left\n\nDid you notice that security wasn’t one of the lifecycle stages for DevOps? Well, it’s not a single stage because it’s woven into EVERY stage. Shift left means you don’t wait to incorporate security into software at the end of a build. You consider security beginning with the initial planning stage and continue to focus on it all the way through, giving you more opportunity to avoid or find and address any issues. Shifting left enables you to make sure the code you are developing functions as intended, and that any vulnerabilities and compliance issues are caught and fixed.\n\n## Understand CI/CD\n\nFirst off, CI/CD means continuous integration and continuous delivery. Combined continuous development methodologies and practices focus on catching vulnerabilities and errors early in the development lifecycle, ensuring that all the code deployed into production complies with standards the DevOps team has established for the software being created. This helps connect development and operations teams, as well as projects, by using automation for building, testing, and deployment. \n\nCI/CD is all about  incremental code changes being made frequently and reliably – a critical part of how a DevOps platform enables an organization to automatically deliver software multiple times a day. This is key for DevOps teams and the overall business because CI/CD helps to quickly and efficiently move software updates into production, making the organization able to respond faster to customer needs. \n\n## How to get started with DevOps: dig deeper\n\nWant to learn more? Our [Beginner's guide to DevOps](https://page.gitlab.com/resources-ebook-beginners-guide-devops.html) has everything you need to get started.\n",[1128,111,1428],{"slug":2888,"featured":6,"template":683},"if-its-time-to-learn-devops-heres-where-to-begin","content:en-us:blog:if-its-time-to-learn-devops-heres-where-to-begin.yml","If Its Time To Learn Devops Heres Where To Begin","en-us/blog/if-its-time-to-learn-devops-heres-where-to-begin.yml","en-us/blog/if-its-time-to-learn-devops-heres-where-to-begin",{"_path":2894,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2895,"content":2901,"config":2906,"_id":2908,"_type":16,"title":2909,"_source":18,"_file":2910,"_stem":2911,"_extension":21},"/en-us/blog/developing-a-successful-devops-strategy",{"title":2896,"description":2897,"ogTitle":2896,"ogDescription":2897,"noIndex":6,"ogImage":2898,"ogUrl":2899,"ogSiteName":697,"ogType":698,"canonicalUrls":2899,"schema":2900},"Developing a successful DevOps strategy","Here's what it takes to build a DevOps practice that works for everyone on the team.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667540/Blog/Hero%20Images/devops-team-structure.jpg","https://about.gitlab.com/blog/developing-a-successful-devops-strategy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing a successful DevOps strategy\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-03-09\",\n      }",{"title":2896,"description":2897,"authors":2902,"heroImage":2898,"date":2903,"body":2904,"category":14,"tags":2905},[1524],"2022-03-09","Some 60% of developers are releasing code 2x faster than before, [thanks to DevOps](https://learn.gitlab.com/c/2021-devsecops-report?x=u5RjB), and a majority of respondents to our 2021 Global DevSecOps Survey said their teams develop software using DevOps or DevSecOps.\n\n[DevOps](/topics/devops/) has had a direct impact on many businesses. Here’s what it takes to develop a successful DevOps strategy.\n\n## What is DevOps?\n\nDevOps is a set of practices that combines dev and ops to create safer software faster.\n\nThe main DevOps principles are automation, [continuous integration and delivery](/topics/ci-cd/) and responding quickly to feedback. Others are agile planning, infrastructure as code (IaC), containerization and microservices. Also, building in quality assurance and security with development and operations through the application lifecycle is important. Incorporating security into a DevOps team is referred to as [DevSecOps](https://about.gitlab.com/topics/devsecops/).\n\nEnabling the speed of delivery while maintaining high software quality requires an [organizational culture shift](https://www.ibm.com/cloud/learn/devops-a-complete-guide) that automates and integrates the efforts of the development and ops teams – two groups that traditionally practiced separately from each other, or in silos.\nBut the best DevOps processes and cultures extend beyond development and operations to incorporate input from all application stakeholders – including platform and infrastructure engineering, security, compliance, governance, risk management, line-of-business, end users and customers – into the software development lifecycle. \n\n## What are the benefits of a successful DevOps strategy?\n\nA successful DevOps strategy puts the focus on the customer. It’s not enough to focus on developing good software because this approach justifies prolonged development and release deadlines. It also overlooks the most critical factor: the consumer of the software. Your customer doesn’t care much about the process – they just want a quality product that will address their problem.  A successful DevOps strategy puts the team in the consumer’s shoes.\n\nAnother benefit of DevOps is that it allows a variety of teams, such as operations, security or project management, to work in an [Agile](/topics/agile-delivery/) setting. While development teams have become more Agile over the years, this occurred in isolation; operations teams have found it challenging to keep up and cannot release software at the same rate. DevOps brings these teams together and accelerates the delivery of software, while keeping the quality high.\n\nShorter development cycles with DevOps produce more frequent code releases, which in turn, makes it easier to spot code defects.\n\n## What key elements make DevOps successful?\n\nLike in most situations, **communication** is key to making a DevOps strategy successful. No business team can function without it, and that goes for a DevOps team. A good DevOps strategy incorporates feedback from developers, co-workers, and key stakeholders when building new systems.\nIT roles used to be more structured and defined, and as mentioned, professionals became used to working in silos. But DevOps has changed that model and work has become more **collaborative**. Teams now need to clearly communicate expectations, requirements and deadlines.\n\nDevOps is about a willingness to **change**. Teams must let go of some of their traditional practices and be open-minded to shifting their focus away from one deliverable and onto the next as business needs and capabilities evolve and change.\n\nTeams must also **accept failure** but not get discouraged by it. Some failure is to be expected, and the concept of [“fail fast”](https://docs.gitlab.com/ee/ci/testing/fail_fast_testing.html) (so you know there’s a problem soon enough to fix it easily) is at the heart of DevOps. They should embrace the possibilities that come from trying new techniques, and not be afraid to get creative. The top teams are those that work together, exchange ideas and push the boundaries of how they work and write more creative code.\n\n## Tips for creating a DevOps roadmap\n\nHaving a standard roadmap provides a DevOps team with a high-level, strategic blueprint of what the company envisions for the product. It’s a valuable reference point for any stakeholder during the software lifecycle. A roadmap also lets ops know when the development team will have a piece of code ready for testing.\n\nWhen creating a DevOps roadmap, make sure to clearly define the objectives and goals. Ask the team what [the collective purpose is for the roadmap](https://www.productplan.com/learn/create-a-devops-roadmap/). Objectives might include:\n\n- Improving engineering and ops teams coordination\n- Creating a single source of truth\n- Building an archive of development and release practices that people can refer to over time that are based on the most effective processes. This will help improve DevOps efforts going forward.\n\nFocused, short-term goals and plans should be established. Organizations typically plan their product roadmaps between 2 and 6 months out.\n\nA common mistake businesses make when building roadmaps is to use text only. By just using word processing documents or spreadsheets, stakeholders won’t get a clear understanding of what’s a high priority, which initiatives are dependent on others and who’s responsible for what.\n\nVisual roadmaps, complete with color-coding and bars, helps stakeholders more easily understand product plans. Roadmaps should also be kept current to reflect changes within the company’s culture and business model.\n\n## What are some common challenges associated with DevOps?\n\nChange isn’t easy and the merging of development and operations may cause a few clashes, but those involved must keep in mind that building a successful DevOps team requires this integration and collaboration between both sides. \nMake a gradual move into DevOps by starting with a small product or component and build from there.\n\nThere can also be challenges with deciding what tools to use, since there are so many available. This makes selecting a tool hard, especially if there’s a lack of knowledge about the technology behind it. Using a [DevOps platform](/topics/devops-platform/) can streamline all these choices as all of the moving parts of DevOps will be available and integrated in one single offering. \n\n[Momentum for DevOps](/blog/a-snapshot-of-modern-devops-practices-today/) is clearly growing because organizations are eager to take advantage of delivering software in shorter development cycles, while enhancing innovation in more stable operating environments and with performance-driven employee teams.",[1128,1428,1466],{"slug":2907,"featured":6,"template":683},"developing-a-successful-devops-strategy","content:en-us:blog:developing-a-successful-devops-strategy.yml","Developing A Successful Devops Strategy","en-us/blog/developing-a-successful-devops-strategy.yml","en-us/blog/developing-a-successful-devops-strategy",{"_path":2913,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2914,"content":2920,"config":2925,"_id":2927,"_type":16,"title":2928,"_source":18,"_file":2929,"_stem":2930,"_extension":21},"/en-us/blog/the-best-of-gitlabs-devops-platform-2021",{"title":2915,"description":2916,"ogTitle":2915,"ogDescription":2916,"noIndex":6,"ogImage":2917,"ogUrl":2918,"ogSiteName":697,"ogType":698,"canonicalUrls":2918,"schema":2919},"The best of GitLab's DevOps Platform 2021","Some highlights from last year, and what to expect from 2022.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667509/Blog/Hero%20Images/continuous-integration-from-jenkins-to-gitlab-using-docker.jpg","https://about.gitlab.com/blog/the-best-of-gitlabs-devops-platform-2021","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The best of GitLab's DevOps Platform 2021\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-02-18\",\n      }",{"title":2915,"description":2916,"authors":2921,"heroImage":2917,"date":2922,"body":2923,"category":14,"tags":2924},[2509],"2022-02-18","\nBefore we get too far into 2022, we wanted to take a look back at the most exciting additions to our [DevOps Platform](/topics/devops-platform/) over the last year. Since we release every month on the 22nd, there were lots of new features to consider, but these stood out to me.\n\n## Epic Boards\n\nIn [GitLab 14.0](/releases/2021/06/22/gitlab-14-0-released/#epic-boards), we made it easy to keep track of all epics in one place through Epic Boards. Our Epic Boards are customizable with a simple “drag and drop” interface accessible to all teammates, not just the technical ones. Now it’s painless to create general or DevOps-focused workflow states. And teams aren’t just more efficient, they can actually be predictable.\n\nExplore our [Epic Boards](https://docs.gitlab.com/ee/user/group/epics/epic_boards.html).\n\n## Integrations with VS Code and Gitpod\n\nFans of Visual Studio Code got a much tighter integration with GitLab in 2021. The [GitLab Workflow Extension](https://docs.gitlab.com/ee/user/project/repository/vscode.html) reduces context switching and improves productivity. And we rounded up [8 ways to get the most out of VS Code and GitLab](/blog/vscode-workflows-for-working-with-gitlab/).\n\nGitLab also created a tighter integration with Gitpod. Developers can now set up environments as code, greatly [speeding up the process](/blog/teams-gitpod-integration-gitlab-speed-up-development/). I think this Gitpod integration is so slick I used it to [code, build and deploy from an iPad](/blog/how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod/). Gitpod and its features give developers an opportunity to think outside the box.\n\n## So much security\n\nIn 2021 we gave security pros a true “home” in GitLab with our security dashboard. Teams can now [see vulnerabilities in a pipeline](https://docs.gitlab.com/ee/user/application_security/security_dashboard/) and easily slice and dice that data as necessary.\n\nStatic application security testing (SAST) also got an upgrade last year. We now have nextgen SAST that will [reduce Ruby false positives](/releases/2021/09/22/gitlab-14-3-released/#next-generation-sast-to-reduce-ruby-false-positives) as well as the ability to automatically test [Infrastructure as Code](/releases/2021/11/22/gitlab-14-5-released/) for the first time.\n\n## Praefect for Gitaly\n\nCustomers who want high availability on their own instances now can use Praefect, [our Gitaly clustering solution](/blog/high-availability-git-storage-with-praefect/), that allows Git to scale. Here’s what you [need to know](https://docs.gitlab.com/ee/administration/gitaly/praefect.html) about configuring a Gitaly cluster.\n\n## A visual pipeline editor\n\nIt’s hard to build it if you can’t see it, and that’s where our Pipeline Editor comes in. Use Pipeline Editor to [quickly set up CI/CD](/blog/pipeline-editor-overview/) because it’s now easy to see configurations and dependencies between jobs. Validate and visualize [all parts of the pipeline](https://docs.gitlab.com/ee/ci/pipeline_editor/) without feeling overwhelmed by the complexity.\n\n## Working with (and on) OpenShift\n\nIt’s now possible to set up a GitLab Runner for [Red Hat’s popular OpenShift infrastructure](https://docs.gitlab.com/runner/install/operator.html). Organizations relying on OpenShift can now use [the GitLab Operator](https://about.gitlab.com/blog/open-shift-ga/) to easily tap into the power of GitLab’s DevOps Platform.\n\n## The GitLab Agent for Kubernetes\n\nLast fall we announced an easier way to tackle GitLab and Kubernetes integrations in a secure and cloud-friendly way: [The GitLab Agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/). We call this `agentk` and here’s [everything you need to know](/blog/setting-up-the-k-agent/) about set up.\n\n## 2021 and 2022\n\nIf I had to sum it up, I’d say that in 2021 we doubled down on security. And this year, expect us to double down on operations, specifically observability, thanks to our [acquisition of Opstrace](/press/releases/2021-12-14-gitlab-acquires-opstrace-to-expand-its-devops-platform-with-open-source-observability-solution.html). It’s going to be an exciting ride!\n",[1128,750,1428],{"slug":2926,"featured":6,"template":683},"the-best-of-gitlabs-devops-platform-2021","content:en-us:blog:the-best-of-gitlabs-devops-platform-2021.yml","The Best Of Gitlabs Devops Platform 2021","en-us/blog/the-best-of-gitlabs-devops-platform-2021.yml","en-us/blog/the-best-of-gitlabs-devops-platform-2021",{"_path":2932,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2933,"content":2939,"config":2944,"_id":2946,"_type":16,"title":2947,"_source":18,"_file":2948,"_stem":2949,"_extension":21},"/en-us/blog/devops-and-the-scientific-process-a-perfect-pairing",{"title":2934,"description":2935,"ogTitle":2934,"ogDescription":2935,"noIndex":6,"ogImage":2936,"ogUrl":2937,"ogSiteName":697,"ogType":698,"canonicalUrls":2937,"schema":2938},"DevOps and the scientific process: A perfect pairing","Research teams have taken to DevOps principles and practices. Find out why and how to adopt DevOps in your organization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668378/Blog/Hero%20Images/hans-reniers-lQGJCMY5qcM-unsplash.jpg","https://about.gitlab.com/blog/devops-and-the-scientific-process-a-perfect-pairing","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevOps and the scientific process: A perfect pairing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christina Hupy, Ph.D.\"}],\n        \"datePublished\": \"2022-02-15\",\n      }",{"title":2934,"description":2935,"authors":2940,"heroImage":2936,"date":2941,"body":2942,"category":14,"tags":2943},[2668],"2022-02-15","\nThe scientific process and the DevOps lifecycle. At first glance, it’s hard to imagine a connection. Yet, if you look at how some of GitLab’s customers and community members are marrying the two, it makes perfect sense.\n\nTake, for example, the European Space Agency (ESA), which uses GitLab extensively for a variety of purposes, including version control, enabling collaboration, increasing security, and coordinating the intellectual resources of its 22 member states. ESA  has more than 140 groups and 1,500 projects stored on its GitLab instance. In the first year of using the DevOps Platform, ESA ran more than 60,000 pipeline jobs, allowing the organization to deploy code faster and to simplify its toolchain. The projects range from mission control systems, onboard software for spacecraft, image processing, and monitoring tools for lLabs. The ESA IT Department also uses GitLab to host its code tools and configurations infrastructure. Since adopting GitLab, ESA has enjoyed a culture of collaboration that is increasing around the organization.\n\nAs you can see with the ESA example, the connection between research and DevOps is powerful. Let’s examine why this combination works so well.\n\nThe scientific process moves through stages: asking a question, conducting background research, constructing a hypothesis, testing your hypothesis by doing an experiment, analyzing data, and reporting results. This process is very often iterative as new information is discovered throughout. It also is very collaborative as researchers work together to formulate hypotheses, gather data, and analyze the data.  Many artifacts are generated throughout the process, including data, analysis scripts, results, and research papers. Often, software itself is built to run equipment, labs, or process data.\n\nDevOps, the set of practices and tools that combines software development and information technology operations, also moves through stages. [These stages](/stages-devops-lifecycle/) include manage,  plan, create, verify, and release. DevOps is also very iterative and collaborative and many different types of artifacts are generated along the way.\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/devopsinscience1.png)\n\n## How the scientific process and the DevOps lifecycle align\n\nWe aren’t the only ones who noticed the similarities! As researchers were looking for tools to help them organize their plans, data, scripts, and results in a way that allowed them to work collaboratively and efficiently, they started using source control management. Storing their artifacts in a central repository had immediate benefits for collaboration.  It was a natural progression from there expanding across the DevOps lifecycle. As the shift happened and scientists began using the DevOps lifecycle for the scientific process, the results were transformational. Shifting the approach of science to follow the DevOps lifecycle resulted in increased transparency, collaboration, reproducibility,  speed to results, and data integrity.\n\nIn this transformation, the first stages of the scientific method – observing and hypothesizing – equate to the DevOps plan stage.  Hypotheses and research tasks can be managed and documented in issue tracking systems. Issues define what work needs to be done and progress can be tracked with milestones and labels. No information is lost in separate email threads or local documents. Assigning issues to users, along with approver and reviewer features, can make the research process highly efficient among collaborators, graduate students, and mentors.\n\nData collected during the testing stage is stored in a central repository where source control management (SCM) keeps them safe and accessible.  [Git technology](/topics/version-control/what-is-centralized-version-control-system/) allows all changes to be controlled, tagged, versioned with branches, and peer-reviewed through merge requests.  Analysis scripts are also stored in [source code management](/solutions/source-code-management/) as well and run using [continuous integration](/solutions/continuous-integration/)(CI), a.k.a. the verify stage. Containerization is used to replicate computing environments and ensure reproducible results.\n\n## The role of documentation\n\nDevOps platforms are able to transform the scientific research process because the whole research lifecycle can be documented with a single source of truth in a repository,  open, shared, and accessed. Where, currently, only final results are reviewed and published in the form of papers, leaving the rest of the process mostly opaque to reviewers and the public, the DevOps workflow allows access to and collaboration on all stages of the scientific lifecycle. As this one repository hosts all stages of the scientific process, metrics can be generated on all contributions. Researchers around the world can use the same containers, environment, and analysis on their own data ensuring reproducible science.\n\n## Breaking down research silos\n\nMost research today is happening sequentially, with locally optimized research groups working in silos. We often see duplication of work, incomplete documentation of results, and intransparent data and analysis. The DevOps transformation is shifting science to concurrent science where researchers are working collaboratively, with full transparency for reviewers.\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/devopsinscience2.png)\n\n## Examples of the Research-DevOps alliance\n\nLet’s take a look at some examples, in addition to ESA mentioned at the outset. Researchers at MathWorks use DevOps tools workflows to perform requirements-based testing on an automotive lane-following system with Model-Based Design, as mentioned in this article [“Continuous Integration for Verification of Simulink Models”](https://www.mathworks.com/company/newsletters/articles/continuous-integration-for-verification-of-simulink-models-using-gitlab.html).\n\nData and code are stored in an SCM and then  are forked to a testing branch.  CI pipelines are used to run various experiements and  tests on the code. When a test-case failure is detected in a GitLab CI pipeline, the researchers create an Issue to track and discuss the bugfix. The bug is reproduced locally in MATLAB, the issue is fixed, and the tests are run locally. The changes are reviewed on the testing branch. These changes can be committed to the testing branch where the verify, test, and build process is repeated. Researchers then create a merge equest to send the changes of the test branch into the master branch and close the corresponding Issue.\n\nAccording to the authors, “CI is gaining in popularity and becoming an integral part of Model-Based Design”.  The benefits of using CI cited by the researchers include: repeatability, quality assurance, reduced development time, improved collaboration, and audit-ready code.\n\nThe Square Kilometre Array Organisation (SKAO) is leading the design of the globally distributed radio telescope SKA, using GitLab SCM and CI for scientific collaboration, development efficiency, and transparency. According to Lead Software Architect Marco Bartolini, “The large success is having been able to onboard code and software projects from many different organizations and with very different tools and technology into one single platform, easily. It was not a pain, and now we got it all under control. So that's brilliant.”\n\nThe sky is the limit for how DevOps is transforming the scientific research process – perhaps it could transform yours.  Vist [GitLab for Education Program](/solutions/education/) to learn more and watch our “GitLab for Scientific Research” video below.\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4PRFhDIV_4Q\" frameborder=\"0\" allowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCover image by [Hans Reiners](https://unsplash.com/photos/lQGJCMY5qcM) on [Unsplash](https://unsplash.com/)\n",[1128,269,1528],{"slug":2945,"featured":6,"template":683},"devops-and-the-scientific-process-a-perfect-pairing","content:en-us:blog:devops-and-the-scientific-process-a-perfect-pairing.yml","Devops And The Scientific Process A Perfect Pairing","en-us/blog/devops-and-the-scientific-process-a-perfect-pairing.yml","en-us/blog/devops-and-the-scientific-process-a-perfect-pairing",{"_path":2951,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2952,"content":2958,"config":2964,"_id":2966,"_type":16,"title":2967,"_source":18,"_file":2968,"_stem":2969,"_extension":21},"/en-us/blog/the-devops-platform-series-building-a-business-case",{"title":2953,"description":2954,"ogTitle":2953,"ogDescription":2954,"noIndex":6,"ogImage":2955,"ogUrl":2956,"ogSiteName":697,"ogType":698,"canonicalUrls":2956,"schema":2957},"The DevOps Platform series: Building a business case","Understanding the need for a DevOps platform is key to realizing the business value of DevSecOps. This is the first in a three-part series.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668185/Blog/Hero%20Images/Chorus_case_study.png","https://about.gitlab.com/blog/the-devops-platform-series-building-a-business-case","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The DevOps Platform series: Building a business case\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lee Faus\"}],\n        \"datePublished\": \"2022-02-03\",\n      }",{"title":2953,"description":2954,"authors":2959,"heroImage":2955,"date":2961,"body":2962,"category":14,"tags":2963},[2960],"Lee Faus","2022-02-03","\n\n\n_This is the first in a three-part series._\n\nOver the past five years, I’ve spent a lot of time with executives having them question me about how other companies and their competitors are navigating [DevSecOps](https://about.gitlab.com/blog/gitlab-is-setting-standard-for-devsecops/). This series shares how to introduce a DevOps platform into your organization to support DevSecOps.\n\n## Realizing the need for DevOps\n\nWhen I was at GitHub, I had a partner at Accenture who provided me with a great definition of DevOps that I still use today: “It is the combination of agility, collaboration, and automation that drives DevOps.” This struck a chord with me because at the time execs were “promoting” DevOps with Jira, GitHub, and Jenkins. They just needed to form a DevOps team to manage these products and provide the integration between them. Then they could mark DevOps off their KPIs for the next year and move on to the next set of challenges.\n\nUnfortunately, this only created more challenges. Tying these products together required a significant amount of work and the people in charge of this integration were usually operations folks or consultants who were more familiar with business continuity plans, standard operating procedures, high availability, and disaster recovery than writing custom code to provide a better experience for users.\n\nThey needed functionality that would capture metrics for team leads, managers, and executives so they could understand how the platform being built was driving customer adoption, increasing revenue or reducing costs. But capturing metrics meant engineering work that led to internal dogfooding. Also, people new to the field of platform engineering were not happy with the solutions they were presented with.\n\nThe entire process led to new evaluations of different products and additional stakeholders. We started managing infrastructure as code, testing tools, security, and deployment tools as part of this solution. We were no longer just building integrations, we were building a developer platform or what most of the executives I talked to called a “Developer Self-Service Platform.”\n\n## The cost of disparate DevOps tools \n\nThe executives I talk to now know this story as they were probably in charge of building this platform five years ago and have since been promoted to own more than just the tooling: Today they’re in charge of site reliability, cloud adoption, or platform engineering. Their teams have anywhere from 10 to 50 different tools, each with their own unique use case. The challenge now is to leverage these tools in a way that they were never intended to be used.  \n\nExecutives need data and analytics to optimize their business. This means collecting the data from all of the tools in a meaningful way where they can build analytical models for them to budget, plan, and report on the state of the business. I know of five Fortune 100 companies that have been on this path for more than three years and are still waiting to provide the first dashboard to their executive teams.\n\nThese companies have easily invested “eight figure” budgets to account for the people, process, culture, and tooling changes required to try and make this work. The total cost of ownership could total more than $10 million when you look at the department's profit and loss statements. The return on investment would take over five years *if* the team is able to generate or reduce costs by an additional $5 million a year in new revenue with a fully integrated platform that is live today. \n\nUnfortunately, people realize quickly that just extracting the data from the tools is not enough. There is context in the conversations that drives real-time decisions and those nuances often don’t make it into the analytical stores. The end result can be knee-jerk business decisions that can be detrimental to the business for years to come.\n\n## Understanding DevOps platform requirements\n\nLet’s begin with a simple question: Do you have requirements for this platform? You are about to embark on building an internal platform for your company. You want to leverage your intellectual capital and experience to drive innovation and create efficiencies that allow you to more effectively run your business for years to come.\n\nIf you don’t have requirements, you should start with a [value stream assessment](/handbook/customer-success/solutions-architects/sa-practices/value-stream-assessments/) to create them. This value stream assessment looks at your lines of business, the processes used to create artifacts, and the time it takes to get those artifacts to a place where they can be used to generate revenue, retain existing customers, or generate business efficiencies.\n\nOnce you have these metrics, measure the time from initial sourcing of the need to the number of people required to touch the artifact and ensure the quality and security of it as it moves across different environments. Divide that result by the time it takes to go from initial change to a production environment.\n\nThe more touchpoints in this process, the more costs will increase and so will the risk of software supply chain issues. Think of this like an assembly line: The hand-offs between touchpoints will require you to implement quality frameworks like [supply chain levels for software artifacts](https://slsa.dev/) to ensure chain of custody for audit and compliance needs. These challenges just keep getting bigger as you try to add additional tools to this platform.\n\nIs it possible to meet all of your requirements with a single product? Try GitLab to see what requirements we can help you meet. The only way to start capturing ROI is to stop building *your* platform today. If you don’t stop building it, you are still capturing costs and every tool you add chips away at the economies of scale. You can think of every new product as one input and one output. This means to go from four tools to five tools you are adding an additional 100 connections to be integrated together. It takes engineering time to properly integrate a product into the overall software supply chain instead of consuming the capabilities as a platform-enabled service that you can use out of the box to meet your requirements. \n\nIf you are interested in learning more about doing a value stream assessment – or if you have done one and looking for ways to build efficiencies inside your organization – please let me know and we can work together to help make the best choice for your organization, even if it means continuing with a platform you already started building.\n\n_In the next part of this series, we will look at how different vendors define the term “platform” and their motivation behind helping you achieve your requirements._\n",[1128],{"slug":2965,"featured":6,"template":683},"the-devops-platform-series-building-a-business-case","content:en-us:blog:the-devops-platform-series-building-a-business-case.yml","The Devops Platform Series Building A Business Case","en-us/blog/the-devops-platform-series-building-a-business-case.yml","en-us/blog/the-devops-platform-series-building-a-business-case",{"_path":2971,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2972,"content":2978,"config":2984,"_id":2986,"_type":16,"title":2987,"_source":18,"_file":2988,"_stem":2989,"_extension":21},"/en-us/blog/how-to-build-out-your-devops-team",{"title":2973,"description":2974,"ogTitle":2973,"ogDescription":2974,"noIndex":6,"ogImage":2975,"ogUrl":2976,"ogSiteName":697,"ogType":698,"canonicalUrls":2976,"schema":2977},"How to build out your DevOps team","Hiring the right DevOps roles put you on the path to success. ","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664007/Blog/Hero%20Images/devopsroles.jpg","https://about.gitlab.com/blog/how-to-build-out-your-devops-team","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to build out your DevOps team\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Johanna Ambrosio\"}],\n        \"datePublished\": \"2022-01-25\",\n      }",{"title":2973,"description":2974,"authors":2979,"heroImage":2975,"date":2981,"body":2982,"category":14,"tags":2983},[2980],"Johanna Ambrosio","2022-01-25","\nGetting started with modern software development can feel overwhelming, particularly if you're trying to build a DevOps team from scratch. Hiring the right DevOps roles may require a blend of art, science, and luck, but it is doable. Here's our best advice on key DevOps roles, and the skills each position needs to make your DevOps team function like a well-oiled machine.\n\n- **Developers:** DevOps is a team sport nowadays. Devs test code, act as [security champions](/blog/why-security-champions/), provision infrastructure, and write automation scripts… just to name a few of the job requirements. They use scrum, Kanban, or other Agile methods to work in short iterations with regular feedback from the business side or from other clients. The dev role has changed dramatically over the past few years and will likely continue to adopt elements of other roles from UX to business-side subject matter expert. They want to continue to stretch themselves, so keep that in mind. In our [2021 Global DevSecOps Survey](/developer-survey/), developers said understanding AI/ML is the most important skill for their future careers.   \n\n- **Operations engineer/systems administrator:** In Olden Times, this is the person who ensured the software could and did run smoothly in production and sent out alarms if it didn't. But on a DevOps team, ops will manage the cloud, help create monitoring and analytics that are integrated into code, manage the tools, deal with the tools, and, of course, help resolve problems. Like the dev role, operations pros need new and emerging skills to stay relevant, including advanced programming languages, subject matter expertise, and a deeper understanding of security, according to our survey.\n\n- **Evangelist:** Someone needs to make sure the rest of the company knows what your team is up to, sing its praises, and communicate what the business's most pressing needs are. Ideally, this is a senior-level person who sits on the company's Executive Committee or board. More than just a cheerleader, an evangelist on a DevOps team should get everyone in the company involved in DevOps, committed to its success, and happy to spend budget on the endeavor. \n\n- **Project manager/release manager:** This DevOps role tracks the team's progress against business objectives, sets goals and timelines, and tries to keep everything running on time. Solving problems with cost, project scope, schedule, and client satisfaction are also squarely in this job description.\n\n\n- **QA tester/automation engineer:** A testing professional plays a critical role on a DevOps team, even with the advent of \"devs who test\" and test automation. Testing pros look at the big picture of the entire software pipeline and at snippets of code. From choosing or creating the right tests to driving test automation, this DevOps role needs out-of-the-box thinking, flexibility, and the ability to pivot at a moment's notice. \n\n\n- **Security engineer:** It's critical to build in security and compliance from the start, rather than trying to tack it on at the end when fixing problems becomes most expensive. A security engineer on a DevOps team must be strategic and hands-on. Security has a lingering tarnished reputation as a top-down problem that devs literally don't have the tools to solve, but are asked to. So for this DevOps role, it's critical to hire someone who can meet dev and ops where they are, explain the challenges and technologies, and work together collegially.\n\n- **User experience (UX) professional:** This DevOps role is the end-user advocate, the person who is totally focused on how the software looks and works from the client's perspective. Think of the UX pro as the person who brings the client and the client's needs right into the development process. In this era of modern software development, [a UX role](/blog/the-evolution-of-ux-at-gitlab/) is a must-have rather than a nice-to-have.\n\nThose are just the \"getting started\" DevOps roles. Other titles to consider include a site reliability engineer or a DevOps platform engineer, an infrastructure engineer, project and product managers, systems engineers and architects, and software architects. Keep in mind that, especially now with the Great Resignation, [hiring talent for any of these DevOps roles](/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain/), and pretty much anything IT-related in general, can take months.\n\nReskilling is an excellent option, though. The DevOps Institute [offers trainings](https://www.devopsinstitute.com/skilup-days/), which it calls SKILup Days, on topics such as site reliability engineering and how to create a CI/CD pipeline. And when thinking about reskilling, don't forget [the importance of soft skills to a DevOps team](/blog/soft-skills-are-the-key-to-your-devops-career-advancement/). If ever there's a place where collaboration and communication matter, it's in DevOps.\n\n_Johanna Ambrosio is a freelance technology writer._\n\nCover image by Hans-Peter Gauster on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[1128,1408,1428],{"slug":2985,"featured":6,"template":683},"how-to-build-out-your-devops-team","content:en-us:blog:how-to-build-out-your-devops-team.yml","How To Build Out Your Devops Team","en-us/blog/how-to-build-out-your-devops-team.yml","en-us/blog/how-to-build-out-your-devops-team",{"_path":2991,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2992,"content":2998,"config":3003,"_id":3005,"_type":16,"title":3006,"_source":18,"_file":3007,"_stem":3008,"_extension":21},"/en-us/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards",{"title":2993,"description":2994,"ogTitle":2993,"ogDescription":2994,"noIndex":6,"ogImage":2995,"ogUrl":2996,"ogSiteName":697,"ogType":698,"canonicalUrls":2996,"schema":2997},"Utilize the GitLab DevOps platform to avoid cloud migration hazards","The GitLab modern DevOps platform can simplify and accelerate planning, managing, moving, and modernizing applications and infrastructure as companies adopt a cloud-first posture on AWS and Google Cloud.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665811/Blog/Hero%20Images/daytime-clouds.jpg","https://about.gitlab.com/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Utilize the GitLab DevOps platform to avoid cloud migration hazards\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nima Badiey\"}],\n        \"datePublished\": \"2022-01-25\",\n      }",{"title":2993,"description":2994,"authors":2999,"heroImage":2995,"date":2981,"body":3001,"category":14,"tags":3002},[3000],"Nima Badiey","\nThese unprecedented times have been an unexpected catalyst driving companies to finally get serious about moving to the cloud. The adoption wave started in retail and banking by consumers who were unable to shop and bank in-person and were forced instead to drastically increase their online purchases.\n\nAs a result, many e-commerce sites hosted on public clouds experienced a Cambrian explosion of activity and business. The impact of the pandemic soon crossed every industry and segment from healthcare and education to hospitality and food services, as more and more companies closed their offices in favor of remote work. With closed buildings came closed data centers and other short-staffing of business-critical services.\n\nCoupled with supply chain disruptions of compute, networking, and storage gear, many IT teams were faced with mounting business continuity challenges, which impacted service level agreements, product quality, and ultimately customer satisfaction.\n\nThe answer to these challenges is to move applications, data, and infrastructure from on-premises to the cloud, with hosting provided by large public cloud providers like Amazon Web Services (AWS) and Google Cloud – both of which are better suited to support business-critical services. \n\nAs businesses continue to define their new processes and procedures, one condition is likely to become permanent: Cloud adoption is expected to accelerate and spread across all industries. [IDC FutureScape](https://www.businesswire.com/news/home/20191029005144/en/IDC-FutureScape-Outlines-the-Impact-Digital-Supremacy-Will-Have-on-Enterprise-Transformation-and-the-IT-Industry) predicts that by 2024 more than 50% of all IT spending will go toward digital transformation and cloud-first innovation projects.\n\nDespite this immutable momentum, many CIOs remain reticent as 80% are still concerned that cloud adoption initiatives alone won’t deliver the expected business agility they need, according to [a McKinsey report](https://www.mckinsey.com/business-functions/mckinsey-digital/our-insights/unlocking-business-acceleration-in-a-hybrid-cloud-world).\n\nOne reason for this is that migrating and modernizing applications simultaneously to the cloud takes more effort and experience than organizations can afford. To be successful, organizations need to adopt new software development strategies and DevOps tools to support hybrid and multi-cloud models. These teams often lack the consistent methodology and toolchains to plan, prioritize, automate, and track the progress of cloud migration projects. Adding to the risks, many companies are hampered with legacy software development workflows, disconnected processes, and siloed tools. They are further burdened with a complicated inventory of mismatched legacy hardware, aging networks, security, and application stacks that are poorly suited to cloud-native architectures.\n\nUltimately, successful cloud migrations require mastering the basics by adopting proven, repeatable, and reliable processes such as breaking big initiatives into manageable workstreams. Consistency and structured repeatability have a greater impact on project success than executive sponsorship, funding, or upgrading the company culture to an “agile” mindset. GitLab plays a critical role in the successful deployment and delivery of these cloud migration projects. \n\n## DevOps: The first logical step in cloud adoption\n\nGitLab is a modern DevOps platform used by startups as well as midsize and Fortune 500 companies to build and deliver software through an integrated toolset. In simple terms, it’s Git for source code management with a built-in CI/CD pipeline that includes security, code scanning, and monitoring. GitLab is an all-in-one integrated platform. No need to digitally piece multiple solutions together and no more switching between different tools and apps just to deploy software code. \n\nAs enterprises plan to migrate apps, services, data, and/or infrastructure to the cloud this year, these projects will benefit from new ways to plan, manage, and deliver value from their cloud investments.\n\nTo get started, GitLab, together with AWS and Google Cloud, has chronicled this journey with valuable guidance to help cloud teams embrace the cultural shift necessary for modern agile teams. In these guides, we map out an approach that empowers cross-functional teams to work together concurrently during migrations, refactorization, and adoption of new cloud services.\n\nWith GitLab, users can define custom assessment methodologies, create repeatable task lists for application migration, store app code and Terraform configuration scripts in Git, and set security protocols easily through simple merge requests. GitLab can also automate the process of testing, scanning, monitoring, and deploying business apps. By embracing next-gen DevOps, cloud migration projects can be more successful with proven, repeatable, and reliable processes all managed on the GitLab DevOps platform. \n\n### Learn more:\n- [Migration to Google Cloud and adopting cloud native](https://learn.gitlab.com/gitlab-google-cloud)\n- [Accelerate your migration to AWS using a DevOps model](https://learn.gitlab.com/gitlab-aws-microsite)\n\n",[1128,1368,2096],{"slug":3004,"featured":6,"template":683},"utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards","content:en-us:blog:utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards.yml","Utilize The Gitlab Devops Platform To Avoid Cloud Migration Hazards","en-us/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards.yml","en-us/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards",{"_path":3010,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3011,"content":3017,"config":3022,"_id":3024,"_type":16,"title":3025,"_source":18,"_file":3026,"_stem":3027,"_extension":21},"/en-us/blog/gitlab-value-stream-analytics",{"title":3012,"description":3013,"ogTitle":3012,"ogDescription":3013,"noIndex":6,"ogImage":3014,"ogUrl":3015,"ogSiteName":697,"ogType":698,"canonicalUrls":3015,"schema":3016},"The role of Value Stream Analytics in GitLab's DevOps Platform","Better DevOps teams start with value stream management. Here's how to get the most out of GitLab's Value Stream Analytics.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668041/Blog/Hero%20Images/Understand-Highly-Technical-Spaces.jpg","https://about.gitlab.com/blog/gitlab-value-stream-analytics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The role of Value Stream Analytics in GitLab's DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2022-01-24\",\n      }",{"title":3012,"description":3013,"authors":3018,"heroImage":3014,"date":3019,"body":3020,"category":14,"tags":3021},[702],"2022-01-24","\n\n***\"Whenever there is a product for a customer, there is a value stream. The challenge lies in seeing it!\"*** *Learning to See - Shook & Rother*\n\nEvery company today is a software company so the level of innovation and delivery has a direct impact on revenue generation. In order to be successful, businesses must deliver an amazing digital experience, keep up with the latest technologies, deliver value at the speed demanded by customers, and do it all with zero tolerance for outages or security breaches. That's where value stream management comes into play.\n\n*“If you can’t describe what you are doing as a value stream, you don’t know what you’re doing.”* *(Martin, K. & Osterling, M. (2014). Value Stream Mapping. McGraw-Hill, p. 15.)*\n\nValue stream management(VSM) is a change in development mindset that puts the customer at the center. VSM allows teams to measure and improve the software delivery and value flow to customers. The development process is outlined from ideation until customer value realization. The focus is no longer on features and functionality – instead, organizations ensure the efforts and resources invested to deliver value to customers will improve flows that are causing bottlenecks, optimizing the cycle and shortening time to market. \n\nYou can learn more on [Value Stream Mapping](/topics/devops/value-stream-mapping/) here\n\n## An overview of GitLab's Value Stream Analytics \n\nAs part of [GitLab's DevOps Platform](/solutions/devops-platform/), Value Stream Analytics provides one shared view of the team's velocity. With insights into how long it takes the team to move from planning to monitoring, it's possible to pinpoint areas for improvement. Value Stream Analytics measures the time spent for each project or group. It displays the median time spent in each stage of the process by measuring from its start event to its end event. It helps identify bottlenecks in the development process, enabling management to uncover, triage, and identify the root cause of slowdowns in the software development life cycle and to quickly act on them to improve efficiency.\n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_1.png)\n\n## Why are Value Stream Analytics important? \n\nThe process of efficient software delivery starts by understanding where the slowest parts are, and what are the root causes behind them. With this information it's possible to build a plan for optimization.  \n\n## Which DevOps stages are tracked? \n\nThe stages tracked by Value Stream Analytics by default represent GitLab's DevOps Platform flow - \n**Issue**, **Plan**, **Code**, **Test**, **Review** and **Staging**.  \n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_stages.png)\n\n## How to customize GitLab's Value Stream Analytics \n\nNote: The stages can be customized in group evel Value Stream Analytics; currently no customization is available in the project level. \n\nClick Edit in the Value Stream Management \n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_4.png)\n\nClick Add another stage \n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_5.png)\n\nDefine stage name, and select start event and end event from the list. \n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_6.png)\n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_7.png)\n\n## The key metrics \n\nThe dashboard includes useful key metrics which help to understand the team performance. If, for example, the values of **new issues**, **commits** and **deploys** are high, it's clear a team is productive. The DevOps metrics commonly known as the **DORA (DevOps Research and Assessment) 4**. The [DORA 4 metrics](https://cloud.google.com/blog/products/devops-sre/using-the-four-keys-to-measure-your-devops-performance) show the value the team delivered to customers.\n\n**Deployment Frequency** shows how often code is deployed to production and brings value to end users. **Lead time for changes** measures how long it takes a change to get into production. Like deployment frequency, this metric measures team velocity.\n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_metrics.png)\n\n## The importance of Value Stream Analytics within GitLab\n\nGitLab is a complete DevOps Platform, delivered as a single application. As such, teams use the same application during the development process from planning to monitoring. One of the benefits of being a single application for the entire DevOps lifecycle is that the data flows from all DevOps stages and is available for analysis, so Value Stream Analytics correlates and identifies how teams are spending their time without the need to integrate with an external tool. \n\nLearn more about [Value Stream Analytics for projects](https://docs.gitlab.com/ee/user/analytics/value_stream_analytics.html) and [Value Stream Analytics for groups](https://docs.gitlab.com/ee/user/group/value_stream_analytics/).\n\nTake a deeper dive into what DORA calls [elite DevOps teams](/blog/how-to-make-your-devops-team-elite-performers/).\n\n\n\n\n\n\n\n\n\n\n",[1128,728,727],{"slug":3023,"featured":6,"template":683},"gitlab-value-stream-analytics","content:en-us:blog:gitlab-value-stream-analytics.yml","Gitlab Value Stream Analytics","en-us/blog/gitlab-value-stream-analytics.yml","en-us/blog/gitlab-value-stream-analytics",{"_path":3029,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3030,"content":3036,"config":3041,"_id":3043,"_type":16,"title":3044,"_source":18,"_file":3045,"_stem":3046,"_extension":21},"/en-us/blog/introducing-modelops-to-solve-data-science-challenges",{"title":3031,"description":3032,"ogTitle":3031,"ogDescription":3032,"noIndex":6,"ogImage":3033,"ogUrl":3034,"ogSiteName":697,"ogType":698,"canonicalUrls":3034,"schema":3035},"Adopt ModelOps within DevOps to solve data science challenges","The ModelOps stage of DevOps applies AI and ML to address complex data science challenges.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668437/Blog/Hero%20Images/faster-cycle-times.jpg","https://about.gitlab.com/blog/introducing-modelops-to-solve-data-science-challenges","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Adopt ModelOps within DevOps to solve data science challenges\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2022-01-21\",\n      }",{"title":3031,"description":3032,"authors":3037,"heroImage":3033,"date":3038,"body":3039,"category":14,"tags":3040},[2548],"2022-01-21","\nIn a [recent blog post](/blog/the-road-to-smarter-code-reviewer-recommendations/) discussing the progress of integrating novel machine learning (ML) algorithms into GitLab we introduced our new [ModelOps stage](/direction/modelops/). This stage is focused on enabling and empowering data science workloads on GitLab. GitLab ModelOps aims to bring data science into GitLab within existing features to make them smarter and more intelligent and empowering GitLab customers to build and integrate data science workloads within GitLab.\n\nAn interesting question we hear a lot is how will this be useful for DevOps professionals? So we wanted to dive into who exactly we’re building ModelOps features for and why. To begin, here is an overview of how we’ve chosen to structure our new ModelOps stage. \n\n## ModelOps: Enabling and empowering data science workloads\n\n![Chart of ModelOps stages](https://about.gitlab.com/images/blogimages/Screen_Shot_2022-01-19_at_1.11.36_PM.png){: .shadow}\n\nModelOps is about taking all the best practices we’ve learned building a DevOps platform and applying them to the unique challenges of AI and ML workloads. Our ModelOps stage is divided into three primary groups: DataOps, MLOps, and AI Assisted. Each group has specific jobs to be done and challenges. Part of the reason we chose this organization model is due to the different user personas we’re trying to solve problems for in each of these areas. Now let’s dive into the people in each group, as well as the challenges each group aims to solve. \n\n## DataOps: Get the data, clean it, and process it\n\nDataOps is focused on everything required to process data workloads, including fetching data, cleaning it, and processing it. You may have heard this called ELT, or Extract, Load, Transformation, of data. But DataOps is more than just the ELT, there are lots of other problems that come with data sources. For example, data located in many disparate systems in many formats and lacking common data definitions. Most data sources require a lot of processing to access, move, clean, and interpret data. We have specialists whose entire job is [all of the work to get data into usable states](https://online.hbs.edu/blog/post/data-life-cycle) so organizations can do something of business value with it. \n\nDepending on the organization, these data professionals may have different titles such as data engineer, data architect, or data analyst.  These data wranglers have many assorted jobs: aggregating disparate data sources, cleaning and shaping data into usable formats, making data available to the business, and even analyzing data and answering business questions.\n\nThe data experts leverage many tools such as ELT platforms, big data warehouses, data pipelines, and database technologies like SQL and elastic search. Data management tooling can be an extremely complex series of connections piping data in and out of various platforms. These challenges are the heart of the problems we’re aiming to solve.\n\n## MLOps: Do something useful with the data\n\nNext is MLOps, which is what most people associate with data science. MLOps aims to enable customer data science use cases, including accessing and interacting with data, AI/ML toolchain integrations, and compute environment integrations. Basically, everything that is required to build, test, train, and deploy AI/ML models into production systems. MLOps leverages math to solve problems using computing power to find patterns in the data that we just discussed with DataOps. \n\nData science teams feature professionals with titles such as data scientists, ML engineers, or ML specialists. These experts usually have a mix of higher-level math and statistics skills, software engineering, and basic DevOps skills. They can cobble together environments to build, train, test, and explore data science models to solve specific business problems.\n\nThe work data scientists do is more than just building ML models. They have to understand the business data and problems they are trying to leverage data science to solve. It’s usually very experimental and requires a lot of iteration to find a solution that solves a particular business problem in a useful way. It’s common for data scientists to spend a lot of time exploring and understanding datasets and the business problems organizations are hoping data science can solve. They then build and train AI/ML models, evaluate model output, and then iterate their models.\n\nAmong the common tools these data scientists use are Python notebooks, which allow them to leverage scripting to explore and manipulate data and try different modeling techniques. They also may use many open source ML and data science frameworks, as well as special data science platforms that help manage, version, interpret, and monitor models. Most of this work almost never happens in production environments. It happens on local machines or in cloud computing platforms where data scientists can leverage highly specialized compute, optimized for running data science models. That leaves an interesting challenge of how do you deploy their work to production systems.  Our last use case, DevOps, provides the solution. \n\n## AI Assisted: Leverage data to solve business problems \n\nWhile our AI Assisted group isn't specifically focused on any one user persona, we are planning to enrich existing GitLab features with ML. Our goal is to take features that require manual work to leverage and apply ML to automate these tasks. Tasks like assigning and labeling issues, choosing code reviewers, and even triaging and fixing security vulnerabilities. You can read more about our AI Assisted plans on our [direction page](/direction/ai-powered/) or check in on the status of our first Applied ML feature, [suggested reviewers](/blog/the-road-to-smarter-code-reviewer-recommendations/). Now that we've touched on improving GitLab for everyone, let's go back to GitLab's main persona, DevOps engineers.\n\n## DevOps: Build, test, and deploy software \n\nDevOps is probably the most understood use case that we’re trying to solve with our ModelOps stage. However, we’re focused on the intersection of DevOps and data science workloads. Specifically what happens when you need to deploy a data science model to a production system. GitLab’s DevOps platform is already an established and mature platform for building, testing, and deploying traditional software applications. But the software stacks of modern organizations are evolving and becoming more sophisticated, including leveraging ML. We’ve described some of the challenges and new personas that are involved with the development of data science workloads, but what happens when it’s time to go to production?\n\nToday, data science teams and DevOps engineers work in separate silos with very different skills sets and technology challenges. So when a data science team has a new ML model they want to push into a production software environment and integrate into a running application, in walks a whole new set of challenges. \n\nJust about every software company now has DevOps teams focused on repeatability, stability, and velocity of software development lifecycles. Everything relating to the design, build, testing, deployment, security, and monitoring of software from idea to deploy into a production system. These teams are usually comprised of software engineers and DevOps engineers. The people who write, build, and test code with repeatable CI/CD, allowing software teams to seamlessly develop software applications. \n\n## Helping them all work together\n\nOur goal with ModelOps is to help all of these people work together to build and deploy data-rich modern applications leveraging novel ML workloads. We want to bring data science into GitLab within existing features to make them smarter and more intelligent and to empower GitLab customers to build and integrate data science workloads in their own applications built and deployed with GitLab. Each of these groups has unique challenges and use cases that are interconnected. That’s part of what makes data science difficult. It has a lot of moving parts and crosses every aspect of modern software development lifecycles with very unique challenges. \n\nIf all of this is interesting to you, you may also enjoy watching our recent Contribute session, where we discuss more about what we plan to accomplish with our ModelOps stage, which you can watch on YouTube.\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n  \u003Ciframe src=\"https://www.youtube.com/embed/C08QVI99JLo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n_This blog post contains information related to upcoming products, features and functionality._\n\n_It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes._\n\n_As with all projects, the items mentioned in this blog post and linked pages are subject to change and delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n",[1128,1069,727],{"slug":3042,"featured":6,"template":683},"introducing-modelops-to-solve-data-science-challenges","content:en-us:blog:introducing-modelops-to-solve-data-science-challenges.yml","Introducing Modelops To Solve Data Science Challenges","en-us/blog/introducing-modelops-to-solve-data-science-challenges.yml","en-us/blog/introducing-modelops-to-solve-data-science-challenges",{"_path":3048,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3049,"content":3054,"config":3059,"_id":3061,"_type":16,"title":3062,"_source":18,"_file":3063,"_stem":3064,"_extension":21},"/en-us/blog/what-will-devops-do-for-your-team-in-2022",{"title":3050,"description":3051,"ogTitle":3050,"ogDescription":3051,"noIndex":6,"ogImage":2762,"ogUrl":3052,"ogSiteName":697,"ogType":698,"canonicalUrls":3052,"schema":3053},"What will DevOps do for your team in 2022?","DevOps brings the technical wins but business is winning too, thanks to this modern software development strategy. Here's what our latest DevOps assessment found.","https://about.gitlab.com/blog/what-will-devops-do-for-your-team-in-2022","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What will DevOps do for your team in 2022?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-01-19\",\n      }",{"title":3050,"description":3051,"authors":3055,"heroImage":2762,"date":3056,"body":3057,"category":14,"tags":3058},[1859],"2022-01-19","\n\nOver the last six months, we’ve asked teams and individual contributors to assess their DevOps platform practices by answering a 20-question quiz. To date, more than 600 people have shared their experiences, providing a clear, and somewhat surprising, snapshot of DevOps as it’s done _today_. There are obvious technical wins, of course, but there are also glimpses of how DevOps and modern software development are driving business change. \n\nHere are some of the key takeaways:\n\n### DevOps is a stand up (and out) choice\t\n\nAlmost 35% of respondents say they’ve been doing DevOps for between one and three years, while 22% report they’ve been at DevOps less than a year. And 16% are in that DevOps sweet spot of between three and five years, while 15% are seasoned DevOps pros with more than five years of experience. \n\nDevOps, of course, enables faster and safer software development and it’s clearly taking teams and entire organizations along for the ride, with much greater levels of collaboration/planning and a commitment to cross-functional processes. Nearly one-quarter of respondents say everyone in their organization considers themselves to be part of the DevOps team. And 17% say security, test, and design have joined dev and ops to create their DevOps teams. \n\nBig changes are happening within those teams as well. Just shy of 30% say the traditional roles of “dev” and “ops” are definitely blurring and 16% report everyone on their team is “cross-functional”. Nearly 15% say dev, sec, ops, and test are all seeing roles change and blend together.\n\nWhen asked how teams handle planning and collaboration, 50% say their processes were either “long-established and effective” or “completely seamless and baked into everything.” Meanwhile, 43% are either just starting a planning/collaboration process or are well underway. \n\nTo put it another way, it appears DevOps drives faster releases *and* better planning and collaboration almost in equal measure. \n\n### A DevOps platform in 2022\n\nJust shy of 36% of quiz takers use an “out of the box” [DevOps platform](/solutions/devops-platform/), while only 7% are considering one. Nearly one-third of respondents say their DevOps platform is a “hybrid” affair of homegrown and purchased solutions, or what GitLab refers to as [DIY DevOps](/blog/welcome-to-the-devops-platform-era/#phase-3",[1128,1428,269],{"slug":3060,"featured":6,"template":683},"what-will-devops-do-for-your-team-in-2022","content:en-us:blog:what-will-devops-do-for-your-team-in-2022.yml","What Will Devops Do For Your Team In 2022","en-us/blog/what-will-devops-do-for-your-team-in-2022.yml","en-us/blog/what-will-devops-do-for-your-team-in-2022",{"_path":3066,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3067,"content":3073,"config":3078,"_id":3080,"_type":16,"title":3081,"_source":18,"_file":3082,"_stem":3083,"_extension":21},"/en-us/blog/how-to-begin-your-devops-journey",{"title":3068,"description":3069,"ogTitle":3068,"ogDescription":3069,"noIndex":6,"ogImage":3070,"ogUrl":3071,"ogSiteName":697,"ogType":698,"canonicalUrls":3071,"schema":3072},"How to begin your DevOps journey","So you want a career in DevOps? These easy and affordable opportunities will let you get started today.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663736/Blog/Hero%20Images/a-deep-dive-into-the-security-analyst-persona.jpg","https://about.gitlab.com/blog/how-to-begin-your-devops-journey","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to begin your DevOps journey\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christina Hupy, Ph.D.\"}],\n        \"datePublished\": \"2022-01-13\",\n      }",{"title":3068,"description":3069,"authors":3074,"heroImage":3070,"date":3075,"body":3076,"category":14,"tags":3077},[2668],"2022-01-13","\n[DevOps](/topics/devops/) is a hot career track. The DevOps industry is projected by IDC to be at [$17.7 billion in revenue by 2024](https://www.idc.com/getdoc.jsp?containerId=US45188520). Such growth requires more DevOps practitioners in all realms. Yet, due to the speed of change in DevOps, students are generally not learning DevOps skills and workflows while in a degree program. That doesn’t mean you have to wait to gain these critical skills. We share how to get the skills you need now.\n\n## Why early exposure to DevOps is important\n\nBy learning DevOps early on in their education, students can drastically shorten the typical six-years-or-more timeline to becoming a DevOps professional. In our [GitLab for Education Survey](/solutions/education/edu-survey/), 40% of student respondents answered that DevOps is critical for workforce readiness and 45% viewed the ability to build a portfolio and record of contributions as a top benefit of using DevOps while in school.\n\nStudents and young professionals learning to code with the same approach they will use in the industry gives them a jump on their careers and makes the transition from the classroom to a DevOps culture that much easier. It can also help to accelerate the digital transformation as newly onboarded employees begin to spread the benefits of iterating faster, innovating together, and increasing deployment velocity.\n\nHere’s how to get a headstart on learning DevOps.\n\n### 1. Bring DevOps to your classroom\n\nIf your university and professors are not currently teaching DevOps or using DevOps tools in your classes, don't worry, we've got a blog post that covers [5 easy ways to bring DevOps into your classroom](/blog/5-ways-to-bring-devops-to-your-campus/). Learn about how our GitLab for Education team can visit your classroom and give a guest lecture on DevOps or a workshop. And how our GitLab for Education Program offers free, top-tier, unlimited licenses to qualifying institutions. Students can also sign up individually for [GitLab’s free tier](/pricing/).\n\n### 2. Explore DevOps on your own\n\nExploring DevOps on your own is a great way to extend your knowledge, gain different perspectives, and build on top of your degree. \n\nDevOps as a discipline, platform, and culture is ever-evolving. With social media, tech publications, case studies, and blog posts there is no shortage of content for you to access. It is easy to tune into industry conversations on Twitter and elsewhere to stay on the cutting edge. We recommend getting started by reading some of our [GitLab blogs](/blog/) or blogs from other organizations in the DevOps space that catch your eye.\n\nFor instance, follow Developer Evangelists or Developer Relations professionals, known as DevRels, from your favorite organizations, and see what they are sharing. Don’t worry about understanding all the details at first, just look for the high-level points, the tools they discuss, and general industry trends. Follow [Michael Friedrich](https://gitlab.com/dnsmichi), GitLab Developer Evangelist, to learn about all things DevOps, especially CI/CD, monitoring, and observability, and follow [Abubakar Siddiq Ango](https://gitlab.com/abuango), GitLab Developer Evangelism Program Manager, to learn about DevSecOps with a focus on the Cloud Native Ecosystem. \n\n### 3. Start networking\n\nThere is no better way to get excited about DevOps and its potential than through networking with other DevOps professionals and enthusiasts. \n\n- Meetups. Tech companies in the DevOps space host monthly meetups (in-person and virtual), where professionals and community members alike listen to a short talk and then engage in a Q&A. These meetups provide opportunities for networking as well. At GitLab, you can see our [upcoming events](https://www.meetup.com/pro/gitlab) and register for free or sign up to host one for your classmates or teammates. (We are here to help](/community/meetups/) and get you started. \n\n- Conferences. GitLab‘s annual user conference, GitLab Commit, showcases amazing presentations from customers across all industries and community members from all over the world, along with breakout sessions so you can network. Keep an eye out for the next one in September and [view the playlist](https://www.youtube.com/c/Gitlab/playlists?view=50&sort=dd&shelf_id=1) from GitLab Commit 2021. Also [DevOps Days](https://devopsdays.org/), a series of free technical conferences around the world, lets you mingle with DevOps professionals and learn more about the industry.\n\n### 4. Get hands-on with DevOps tools and platforms\n\nReady to jump in? Gaining hands-on experience is the fastest way to start your journey, and you don’t need an internship or job to access tools. If you are a current student or early professional, you can begin to build a portfolio of projects on GitLab or your [platform of choice](https://about.gitlab.com/topics/devops/beginner-devops-platform/). Even simple projects, such as creating a Twitter bot or Python script, can be done in a source control management system like GitLab. \n\nStore relevant homework, course projects, capstone projects, and side projects in one central repository and your future employers will be able to see your portfolio and how your skills have progressed over time. With [GitLab pages](https://docs.gitlab.com/ee/user/project/pages/), you can even publish your resume and keep a journal of blog posts documenting your journey in DevOps. \n\nAs example, check out [the profile page of PJ Metz](https://gitlab.com/PjMetz), GitLab Education Evangelist. Notice everything he’s worked on is right there and you can click to see his commits and merge requests. The earlier you start to build a portfolio, the more you’ll have to share with potential employers\n\n### 5. Contribute to the open source community\n\nAnother great way to gain experience is to contribute to open source projects. Students and young professionals often aren't aware of the value of contributing to open source projects, haven’t considered it, or maybe think that you need high-level developer skills to contribute. \n\nBy nature, anyone who has very basic technical skills can contribute to an open source project at some level. Most open source projects have resources available for new contributors or first-time contributors, including a “Getting Started” guide or a list of contributions needed. Contributions aren’t limited to expert coders; open source communities accept input from a variety of skill levels and experience. For example, new contributors can work on documentation and language translation. Minor UX changes or bug fixes are also great first contributions. \n\nAdditionally, many open source projects often have engaged communities that are invested in helping new contributors learn and grow their skills. This set of unique characteristics makes contributing to open source projects a great starting point for people from diverse backgrounds.  \n\nGitLab is an open core platform with a vibrant community. We have over 10,000 merge requests from the wider community with an average of 250 contributors per month. You can contribute to GitLab in [three ways](/community/contribute/):\n\n- Fix bugs\n- Add to documentation\n- Translate our docs and products to different languages\n\nWe make contributing very easy and accessible to first-time contributors. We even label each issue with `quick win`.  Our [quarterly hackathons](/community/hackathon/) enable you to network with our community, meet merge request coaches, attend meetups, and win sweet swag prizes. For more, check out our #contributors channel on [Discord](https://discord.gg/gitlab).\n\n### 6. Earn some industry credentials \n\nAfter getting your feet wet and building skills on your own, you may also be interested in adding some more formal credentials to your resume. Courses and certificate programs are a great way to add to your degree or work on professional development early in your career. Certifications are generally achieved after gaining some hands-on experience and working in the field. \n\n- DevOps courses. Most online learning platforms, such as Coursera, Udemy, and LinkedIn Learning have some form of DevOps course. For example, LinkedIn Learning has a free [DevOps foundations course](https://www.linkedin.com/learning/devops-foundations/development-and-operations-2?autoAdvance=true&autoSkip=false&autoplay=true&resume=true&u=2255073).\n\n- DevOps certifications. If you have some experience under your belt and are interested in a more formal path, a DevOps certification could be of interest to you. DevOps certification is an accredited credential that is earned by demonstrating some specific skills and subject matter that are required to work in the DevOps profession. These credentials are earned by taking courses, passing assessments, and participating in performance reviews, or providing work samples. DevOps certifications can be specific to a certain tool, such as the Docker Certified Associate or Kubernetes Certification. Amazon Web Services, or AWS, also offers a Certified DevOps Engineer Exam. Some DevOps certifications are more tool- and platform-agnostic such as those offered by the [DevOps Institute](https://www.devopsinstitute.com/certifications/). \n\nGitLab has a learning platform with several courses and certification pathways, including a GitLab Certified Associate, GitLab Certified CI/CD Specialist, and GitLab DevOps Professional. See our [full list](/learn/certifications/public/) or [sign up to learn more](https://gitlab.edcast.com/). \n\nWherever you are on your journey to becoming a DevOps professional, these resources should help you move forward and learn more about this exciting aspect of software development.\n",[1231,1408,269],{"slug":3079,"featured":6,"template":683},"how-to-begin-your-devops-journey","content:en-us:blog:how-to-begin-your-devops-journey.yml","How To Begin Your Devops Journey","en-us/blog/how-to-begin-your-devops-journey.yml","en-us/blog/how-to-begin-your-devops-journey",{"_path":3085,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3086,"content":3092,"config":3096,"_id":3098,"_type":16,"title":3099,"_source":18,"_file":3100,"_stem":3101,"_extension":21},"/en-us/blog/new-year-new-programming-language",{"title":3087,"description":3088,"ogTitle":3087,"ogDescription":3088,"noIndex":6,"ogImage":3089,"ogUrl":3090,"ogSiteName":697,"ogType":698,"canonicalUrls":3090,"schema":3091},"New year, new programming language","Use the calendar turnover as an excuse to spark your curiosity and learn one of the hottest programming languages.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668524/Blog/Hero%20Images/closeup-photo-of-black-and-blue-keyboard-1194713.jpg","https://about.gitlab.com/blog/new-year-new-programming-language","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New year, new programming language\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-01-13\",\n      }",{"title":3087,"description":3088,"authors":3093,"heroImage":3089,"date":3075,"body":3094,"category":14,"tags":3095},[1859],"\nIt’s 2022, time to learn something new. So how about studying a new programming language? Keeping your [dev skills sharpened](/blog/the-top-skills-you-need-to-get-your-devops-dream-job/) and gaining fluency in more than one language is ideal for DevOps pros, according to DevOps Institute’s [2021 Upskilling Report](https://info.devopsinstitute.com/2021-upskilling-report-download).\n\nBut with all the new programming languages around, it can be tricky to know where to begin. [Stack Overflow’s 2021 Survey](https://insights.stackoverflow.com/survey/2021) found devs were most interested in learning Python, JavaScript, and Go. JavaScript is a fairly ubiquitous language, so let’s look instead at tutorials and advice for some up-and-coming languages, including Python, Go, Rust, Groovy, and Kotlin.\n\n## The promise of Python\n\nPython is a very popular second or third language for websites, analytics, and all things DevOps. It’s also very easy to [start learning](/blog/beginner-guide-python-programming/). Python.org offers [a free tutorial](https://www.python.org/about/gettingstarted/). There is also an [interactive option](https://www.learnpython.org).\n\n## Go for the gold\n\nAnother language to consider is Go because its proponents say it’s incredibly easy to learn and use. Go is so interesting that GitLab Staff Developer Evangelist [Brendan O’Leary](/company/team/#brendan) is going to learn it this year [and plans to share the journey on his blog](https://boleary.dev/blog/2022-01-10-new-year-new-language.html). Learn the [basics of Go](https://go.dev/doc/tutorial/getting-started) and then consider diving into [Go by Example](https://gobyexample.com).\n\nOnce you’re done hitting the books, tackle a real-world challenge, like [using Go for CI](/blog/go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss/).\n\n## All about Rust\n\nIt’s safe to say the devs who know and use Rust _love_ Rust. But, to be fair, it’s not necessarily the easiest language to learn. That said, if the goal is secure code, [Rust is a solid choice](/blog/rust-programming-language/). To try Rust on for size, devs can either [read The Rust Programming Language book](https://doc.rust-lang.org/book/) or try [the Rustlings course](https://github.com/rust-lang/rustlings/). Overachievers might want to [learn how to fuzz Rust code](/blog/how-to-fuzz-rust-code/). \n\n## Feeling Groovy\n\nGroovy is all about scripting and, as such, is ideal for those wanting to learn automation. Also, Groovy works side-by-side with Java, meaning it’s going to be a language that comes easily to those devs. [Get started with Groovy](https://www.guru99.com/groovy-tutorial.html). For a deeper dive, here’s [a list of books about Groovy](https://groovy-lang.org/learn.html).\n\n## Create with Kotlin\n\nApparently Kotlin is a programming language that [makes developers happier](https://kotlinlang.org) and is ideal for data science projects and Android apps. If you want to be a happier developer, too, [here’s how to get started with Kotlin](https://www.codecademy.com/learn/learn-kotlin/modules/learn-kotlin-introduction-to-kotlin). Google also offers a [bootcamp for Kotlin developers](https://www.udacity.com/course/kotlin-bootcamp-for-programmers--ud9011).\n\n## Bonus round: Use Python and Rust together\n\nBecause there is no point in learning a new programming language unless you can use it, here’s a step-by-step guide to bringing your application idea to production [using Python, Rust, and GitLab CI](/blog/python-rust-and-gitlab-ci/).\n",[1128,1408],{"slug":3097,"featured":6,"template":683},"new-year-new-programming-language","content:en-us:blog:new-year-new-programming-language.yml","New Year New Programming Language","en-us/blog/new-year-new-programming-language.yml","en-us/blog/new-year-new-programming-language",{"_path":3103,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3104,"content":3109,"config":3114,"_id":3116,"_type":16,"title":3117,"_source":18,"_file":3118,"_stem":3119,"_extension":21},"/en-us/blog/16-ways-to-get-the-most-out-of-software-documentation",{"title":3105,"description":3106,"ogTitle":3105,"ogDescription":3106,"noIndex":6,"ogImage":2204,"ogUrl":3107,"ogSiteName":697,"ogType":698,"canonicalUrls":3107,"schema":3108},"How to get the most out of software documentation","Want to get even more mileage out of your DevOps platform? Better software documentation is the answer. Here are tips to help you get started.","https://about.gitlab.com/blog/16-ways-to-get-the-most-out-of-software-documentation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to get the most out of software documentation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-01-11\",\n      }",{"title":3105,"description":3106,"authors":3110,"heroImage":2204,"date":3111,"body":3112,"category":14,"tags":3113},[1364],"2022-01-11","\n\nIt’s not a glamorous part of a DevOps platform, but software documentation is easy, sometimes hands-free, and, if done correctly, can help speed up development and deployment. Here are some tips to refresh your software documentation practice.\n\n## Defining documentation\n\nSoftware documentation – which includes everything from manuals to system and design requirements, change lists, code comments, and alert records – is a way to unify efforts between projects and DevOps teams, and to share specialized knowledge and guidance. It’s also a way to standardize practices and benchmark metrics. There’s a direct correlation between creating clear, comprehensive, searchable, up-to-date, and well-organized documents and a DevOps team’s success.\n\nNeed proof? According to the [Accelerate State of DevOps 2021 report](https://gitlab.com/gitlab-com/www-gitlab-com/uploads/069ee8e2ee6af463cf0aafcd89eda33e/state-of-devops-2021.pdf) from DORA, the DevOps Research and Assessment team at Google, DevOps teams with solid documentation practices are 2.4 times more likely to meet or exceed their reliability targets, 3.8 times more likely to implement security practices, and 2.5 times more likely to fully leverage the cloud.\n\nMaking sure you have strong documentation actually is one of the six suggestions the DORA report gave DevOps professionals who [want to become elite team performers](/blog/how-to-make-your-devops-team-elite-performers/).\n\nAs you work on a [DevOps platform](/solutions/devops-platform/) and create new efficiencies and processes, you will want to document them so you can carry them forward. No continually reinventing the wheel for you.\n\n### Tips for creating solid software documentation\n\nSo how do you go about building good documentation? Here are some basic steps to follow:\n\n- You need to decide who is responsible for the documentation. What works best for your team and your organization? Does the project need a [technical writer](/handbook/product/ux/technical-writing/) or can one of your developers handle it? Give one person or just a few people ownership of documentation. You’re more likely to have quality software documentation when someone has clear responsibility and no one can pass the buck. \n\n- Don’t forget about incorporating user experience into your documentation. It will give you a different view on use cases and experiences and enable readers to have their success moment [more quickly](https://docs.gitlab.com/ee/ci/quick_start/). \n\n- Think about the security requirements for your software. For instance, when a project uses network communication over public transport, does it provide secure communication with TLS and/or https? Inform users about [support policies for security releases](https://docs.gitlab.com/ee/policy/maintenance.html), allowing to plan accordingly for upgrades and maintenance windows. Additionally, what measurements do you need to take to make sure it complies with company security policies? Note that information in your documentation.\n\n- Use your documentation to explain technical decisions and share insights into [reference architectures](https://docs.gitlab.com/ee/administration/reference_architectures/). When debugging a problem, it is helpful to learn about the decisions, and also have ‘get help’ and [‘troubleshooting’ sections](https://docs.gitlab.com/ee/ci/troubleshooting.html) in your documentation.\n\n- Provide details about issues you faced with the project and how you worked them out. Make sure the details are explained so that others can easily understand them. Add URLs to issues or epics into your documentation to allow readers to follow, for example the [version history for product features](https://docs.gitlab.com/ee/development/documentation/styleguide/#version-text-in-the-version-history) in the GitLab documentation.\n\n- There should be specific rules about how to change, expand and update documentation. Create [documentation style guides](https://docs.gitlab.com/ee/development/documentation/styleguide/), including requirements, examples, use cases and specifications for writing for a global audience. If changes are made creating inconsistent data formats, it can be more difficult to organize and search documents.\n\n- Don’t just document at the end of a project. It should be done continuously throughout the development and deployment lifecycle – from planning through monitoring and feedback. (We’ll give you more tips about this below.)\n\n- Give people who are responsible for documentation the [training](/handbook/product/ux/technical-writing/fundamentals/) they need in how to collect data, write, organize, and maintain it.\n\n- Make sure the [people responsible for documentation](/handbook/product/ux/technical-writing/#designated-technical-writers) are included in all aspects of the DevOps lifecycle. Bring them into planning, design, and testing meetings. They can’t write about or collect information about what they don’t know is happening.\n\n- Make use of data created by automated processes. (Again, there’s more information on this below.)\n\n- Make sure your documentation isn’t just paraphrasing what the source code flow does. Explain the “why” as well as the use case for the project. Dependending on the size and users, your audiences may differ, and the introduction needs an [overview with different navigation routes](https://docs.gitlab.com/ee/index.html).\n\n- There’s no one right way to handle documentation. What you need for documentation may vary depending on things like the size and nature of your organization, the scope of your software projects, and compliance issues. A hospital or financial institution’s documentation needs might differ from those of a small, private company.\n\n## Continuous software documentation\n\nMuch like there are continuous integration and deployment, there also can be continuous documentation. You can make the automated processes on a DevOps platform do a good chunk of your documentation work by having them capture key information throughout the DevOps lifecycle and funnel it into your documentation stores. Make it part of your development workflow by approaching documentation with a DevOps mindset. Software documentation is easier and more helpful when it’s done continuously.\n\nYou can leverage existing tools to generate, convert and present documentation. GitLab provides an extensive REST API, which allows to [update the wiki](https://docs.gitlab.com/ee/api/wikis.html) programmantically, or modify a Markdown file in the Git repository from your CI/CD pipelines. If you want to present the documentation on a website, you can use [MkDocs](https://www.mkdocs.org/) to generate a static documentation website [served with GitLab Pages](https://gitlab.com/pages/mkdocs) for example. Code documentation with [Doxygen](https://www.doxygen.nl/manual/docblocks.html) can be generated in the same way as a [website reference documentation](https://gitlab.com/pages/doxygen). \n\n### Tips to make documentation easier and more continuous\n\n- The DevOps platform’s automated systems, which govern processes and monitor everything from system to software configurations, generate logs that can create a real-time, ongoing stream of documentation.\n\n- Scripts and configuration files that control automated processes, like testing, hold important configuration data that can be fed into documentation.\n\n- Issue and alert logs, which generally contain information about problems, can be automatically documented. \n\n- Integrated [Observability](/direction/monitor/) keeps track of performance and availability of the software and also can add to documentation by providing access to metrics, traces and log dashboards and panels.  \n\nThese are just a few ways to automatically feed your continuous documentation operation. Sure, there are forms of documentation that will need some hands-on, but there are a lot that can be generated as part of the ongoing process. The data is there, so make good use of it.\n\n“Good documentation is foundational for successfully implementing DevOps capabilities,” the DORA report noted. “Teams with high quality documentation are better able to implement technical practices and perform better as a whole… From security to testing, documentation is a key way to share specialized knowledge and guidance both between these specialized sub-teams and with the wider team.” \n\n_[Michael Friedrich](/company/team/#dnsmichi), Senior Developer Evangelist, contributed to this blog post._\n",[1231,727,1428],{"slug":3115,"featured":6,"template":683},"16-ways-to-get-the-most-out-of-software-documentation","content:en-us:blog:16-ways-to-get-the-most-out-of-software-documentation.yml","16 Ways To Get The Most Out Of Software Documentation","en-us/blog/16-ways-to-get-the-most-out-of-software-documentation.yml","en-us/blog/16-ways-to-get-the-most-out-of-software-documentation",{"_path":3121,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3122,"content":3128,"config":3133,"_id":3135,"_type":16,"title":3136,"_source":18,"_file":3137,"_stem":3138,"_extension":21},"/en-us/blog/5-ways-to-bring-devops-to-your-campus",{"title":3123,"description":3124,"ogTitle":3123,"ogDescription":3124,"noIndex":6,"ogImage":3125,"ogUrl":3126,"ogSiteName":697,"ogType":698,"canonicalUrls":3126,"schema":3127},"5 ways to bring DevOps to your campus","Educators can give students a career advantage by collaborating with GitLab to bring DevOps lectures, tools, and community straight to the classroom.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668161/Blog/Hero%20Images/armycyberschool.jpg","https://about.gitlab.com/blog/5-ways-to-bring-devops-to-your-campus","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 ways to bring DevOps to your campus\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"PJ Metz\"}],\n        \"datePublished\": \"2022-01-11\",\n      }",{"title":3123,"description":3124,"authors":3129,"heroImage":3125,"date":3111,"body":3131,"category":14,"tags":3132},[3130],"PJ Metz","\nOrganizations around the world and across industries are adopting the DevOps methodology where development and operations are blended to securely accelerate software delivery. As this approach becomes a mainstay of software development, companies will need skilled professionals to fill key DevOps roles. Yet, as with most technological change, educational opportunities often lag behind real-world applications.\n\nGitLab aims to change this and has developed five ways educators can bring DevOps instruction and our [DevOps Platform](/solutions/devops-platform/) to your campus, affording students, professors, researchers, and IT teams the unique opportunity to learn DevOps skills firsthand, including DevSecOps, and offering your graduates and organization a competitive advantage.\n\n**1. GitLab for Education program**\n\n[GitLab for Education](/solutions/education/) provides free licenses of Ultimate to educational and research institutions as long as it is used for teaching or nonprofit research purposes. If you’re going to use GitLab in a classroom and want your students to use it for their schoolwork, then this is the option for you. This is set up by a full-time employee of the university and is a full license with as many seats as you need. Our Ultimate license is everything that our major enterprise customers use to create their apps and now it’s available to university students across a variety of disciplines. Signing up is simple via our [join page](/solutions/education/join). This kicks off a process that takes a few weeks to complete and ends with a license that brings your classroom or research into the world of the DevOps Platform. Learn how GitLab for Education has benefited other institutions, including the [University of Washington](/customers/uw/), [Dublin City University](/customers/dublin-city-university/), and Heriot Watt University.\n\n**2. GitLab for Campuses**\n\nGitLab for Campuses lets your developers, IT professionals, and other employees working with the technical administration of your university have access to world-class DevOps tools. Rather than cobbling together multiple applications for a Do-It-Yourself DevOps solution, we can provide you access to our single DevOps platform at a discounted rate. [GitLab for Campuses](/solutions/education/) is an offering that covers a large swath of your user base. You would still be able to grant access to students on your campus to use GitLab just like you can with GitLab for Education, but you have the added benefit of The DevOps Platform being used for running your entire institution’s IT.\n\n**3. GitLab Guest Lecture**\n\nDevOps might be a brand-new consideration for your classroom; perhaps this is your first time hearing about it. GitLab’s education team is here to help you by providing a DevOps 101 guest lecture, which you can schedule for your class. We can have a lecture during one of your sections or set up a time for multiple sections of your class to come together and learn about what DevOps is and how to learn more. This type of industry information is invaluable for students looking to join a company right out of college. We’re not just talking about The DevOps Platform, but DevOps as an operational and cultural change in software development, as well as how DevOps implementations can be present in non-CS careers and companies. Let our team of former educators help guide your class into the exciting world of DevOps with a guest lecture. [Fill out this form](https://forms.gle/y2r5o83i8z6rfJPh8) to find out more about our Guest Lecture opportunities.\n\n**4. GitLab Student Contribution Workshop**\n\nContributing to open source is one of the best ways students can build skills, make connections, and add to their portfolio to showcase their abilities and work. Open source is everywhere in DevOps, especially at GitLab. Not only are several open source projects hosted on GitLab, GitLab itself is open core and [open for contributions](/community/contribute/).\n\nWe believe [everyone can Contribute](/company/mission/#mission), but we know that the first contribution can be daunting; students might not know where to start, how to create a merge request, or what the maintainers are looking for. Even basics like working locally and git commands might be a little confusing if students haven’t encountered them before. One way for students to  participate is through our hackathon. GitLab hosts a [hackathon](/community/hackathon/) once every three months with helpful issue tags and other ways to easily find places where we are looking for contributors to help build the future. Past hackathons have included swag prizes for every merge request that gets merged as well as a top-tier prize for the most contributions. Top contributors to GitLab are also eligible for our [Heroes program](/community/heroes/).\n\nBecause we believe so strongly in the power and importance of open source, we are offering a Contribution Workshop where a GitLab team member will walk students through some of the ways they can contribute to open source on GitLab. If your class, student organization, or large group of students wants to learn more about open source contributions, [contact us](https://docs.google.com/forms/d/e/1FAIpQLSe8yQkCMjylb-9w3WZoz3tmN7hmhnrb2LRoXWJd6D5ncP_o6Q/viewform?usp=sf_link). We’ll take it from there and bring open source to your campus.\n\n**5. GitLab Student Organization Workshop**\n\nStudent organizations still remain one of the best ways to build community among future professionals and make connections that can be vital to a student’s career. Meeting others working or studying in the same field also promotes sharing of information and resources to create an environment that maximizes potential success for everyone. These organizations, like on-campus clubs, professional fraternities and sororities, and even professional organizations with student chapters, are a great way to start building your future with your peers. GitLab is looking to bring a small workshop to student organizations at your university where you’ll not only learn how to use GitLab and start using DevOps best practices, but you’ll also boost some coding skills by actually building with GitLab. We’ll be offering a workshop in Python or Node.js where we will learn to build either a Twitter bot or a Discord bot.\n\nBecause of the ever-evolving presence of Covid and travel complications, we can’t promise this workshop will be in person. As of right now, the safest way is to get together virtually. If you are a member of a student organization looking to give your members an opportunity to learn more about DevOps, GitLab, or a fun project to make a bot and level up some skills, then this is the workshop for you. [Sign up here](https://docs.google.com/forms/d/e/1FAIpQLSecpQ1tmFpAPeeT9rasWcAtaEF8nv62LEDsKyJEdJJbe5Z8RQ/viewform?usp=sf_link)\n\n\n",[1128,1408,269],{"slug":3134,"featured":6,"template":683},"5-ways-to-bring-devops-to-your-campus","content:en-us:blog:5-ways-to-bring-devops-to-your-campus.yml","5 Ways To Bring Devops To Your Campus","en-us/blog/5-ways-to-bring-devops-to-your-campus.yml","en-us/blog/5-ways-to-bring-devops-to-your-campus",{"_path":3140,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3141,"content":3147,"config":3152,"_id":3154,"_type":16,"title":3155,"_source":18,"_file":3156,"_stem":3157,"_extension":21},"/en-us/blog/the-road-to-smarter-code-reviewer-recommendations",{"title":3142,"description":3143,"ogTitle":3142,"ogDescription":3143,"noIndex":6,"ogImage":3144,"ogUrl":3145,"ogSiteName":697,"ogType":698,"canonicalUrls":3145,"schema":3146},"The road to smarter code reviewer recommendations","Machine learning is coming to GitLab's code review process. Here's what you need to know, and how you can help!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668426/Blog/Hero%20Images/retrospectivesgitlabpost.jpg","https://about.gitlab.com/blog/the-road-to-smarter-code-reviewer-recommendations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The road to smarter code reviewer recommendations\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2022-01-04\",\n      }",{"title":3142,"description":3143,"authors":3148,"heroImage":3144,"date":3149,"body":3150,"category":14,"tags":3151},[2548],"2022-01-04","\nYou may recall back in June 2021, we [announced the acquisition of UnReview](/press/releases/2021-06-02-gitlab-acquires-unreview-machine-learning-capabilities/), a machine learning (ML) based solution for automatically identifying appropriate expert [code reviewers](/stages-devops-lifecycle/create/) and controlling review workloads and distribution of knowledge.\n\nAt the start of the new year we wanted to provide an update on our integration progress and our wider vision of leveraging machine learning to make GitLab's [DevOps Platform](/solutions/devops-platform/) smarter. You see, the acquisition of UnReview also was the initial staffing of [our new ModelOps stage](/direction/modelops/).\n\n### Our Newest DevOps Stage\n\nThis new stage, which we’ve named ModelOps, is focused on enabling and empowering data science workloads on GitLab. GitLab ModelOps aims to bring data science into GitLab both within existing features to make them smarter and more intelligent, but also empowering GitLab customers to build and integrate data science workloads within GitLab.\n\nSo what is ModelOps you may wonder? We view ModelOps as an all encompassing term to cover the entire end to end lifecycle of artificial intelligence models. We wanted to set our vision wide to fully cover everything needed to power data science workloads. DataOps is the processing of data workloads (think traditional ELT: extract, load, transform) and MLOps is the building, training, and deployment of machine learning models. If you’re confused don’t worry, it’s a lot to wrap your head around.\n\n![a look at the stages of MLOps](https://about.gitlab.com/images/blogimages/MLops.png){: .shadow.small.center}\n\nToday our DevOps Platform helps plan, build, test, secure, deploy, and monitor traditional software. Now we want to extend our DevOps Platform to include AI and ML workloads. If this is interesting to you, be sure to check out our recent Contribute talk where we dive deeper into plans for our ModelOps stage.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/C08QVI99JLo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### UnReview as our first feature\n\nSo what does this have to do with UnReview? Our acquisition of UnReview is going to be our first [AI Assisted](/direction/ai-powered/) group’s feature: suggested reviewers within [GitLab’s existing reviewers experience](/blog/merge-request-reviewers/). Today, a developer in a merge request has to manually choose a reviewer to look at their code. With UnReview we can leverage the contribution history for a project and recommend someone well-suited for code review of your specific changes.\n\nHere’s an early mockup (and it may differ from our final UI) of how we’re thinking about this integration:\n\n![an early mockup of our UI](https://about.gitlab.com/images/blogimages/codereviewmockup.png){: .shadow.small.left}\n\nThe UnReview algorithm looks at a variety of data points from your project’s contribution history to suggest an appropriate reviewer. We’re still in the early days of this integration but our initial internal testing shows great suggestions.\n\n### Customer beta coming soon!\n\nThis leads me to a final question, might you want to be one of our first customers to try this new code review experience? In early 2022, we’ll begin a private customer beta of this new functionality. If interested, [fill out this form to express interest](https://docs.google.com/forms/d/e/1FAIpQLScpmCwpwyBr0GrXxBQ6vE02eokclFAs9lFk_g5dcyuGaHqFuQ/viewform). Do note that we can’t accept everyone and we’ll focus initially on customer profiles that are well suited for the initial version of the suggestion algorithm. Our only ask is we’d like to find customers with active projects that have a healthy number of contributors. The model currently works best on larger repositories with lots of contributors where it may not immediately be clear who is an ideal code reviewer.\n\nWe can’t wait for customers to begin using this new reviewer suggestion experience and will be providing more updates in early 2022.\n",[1128,233,727,771],{"slug":3153,"featured":6,"template":683},"the-road-to-smarter-code-reviewer-recommendations","content:en-us:blog:the-road-to-smarter-code-reviewer-recommendations.yml","The Road To Smarter Code Reviewer Recommendations","en-us/blog/the-road-to-smarter-code-reviewer-recommendations.yml","en-us/blog/the-road-to-smarter-code-reviewer-recommendations",{"_path":3159,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3160,"content":3166,"config":3172,"_id":3174,"_type":16,"title":3175,"_source":18,"_file":3176,"_stem":3177,"_extension":21},"/en-us/blog/devops-adoption",{"title":3161,"description":3162,"ogTitle":3161,"ogDescription":3162,"noIndex":6,"ogImage":3163,"ogUrl":3164,"ogSiteName":697,"ogType":698,"canonicalUrls":3164,"schema":3165},"Understand how your teams adopt DevOps with DevOps reports","Learn about analytics, DevOps reports, DevOps scores, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668473/Blog/Hero%20Images/john-schnobrich-FlPc9_VocJ4-unsplash.jpg","https://about.gitlab.com/blog/devops-adoption","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Understand how your teams adopt DevOps with DevOps reports\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Orit Golowinski\"}],\n        \"datePublished\": \"2021-12-15\",\n      }",{"title":3161,"description":3162,"authors":3167,"heroImage":3163,"date":3169,"body":3170,"category":14,"tags":3171},[3168],"Orit Golowinski","2021-12-15","\n\nGitLab has an extraordinary range of features for a single application, providing an [entire DevOps platform](/stages-devops-lifecycle/) from [portfolio planning](/stages-devops-lifecycle/plan/) all the way through to [monitoring](/stages-devops-lifecycle/monitor/) and [service desk](https://docs.gitlab.com/ee/user/project/service_desk/). As such, GitLab is uniquely positioned to deliver a complete picture of your organization's DevOps journey and your return on investment in automation and DevOps practices.\n\nSome of the most interesting and difficult questions that organizations ask themselves are:\n\n* What do we gain from different development practices used by our teams?\n* What makes one team more efficient than another?\n* What practices have been successful in one team that we can introduce to others?\n\n## Analytics\n\nGitLab has several metrics to give you insight into the development lifecycle:\n\n* [Application Security](https://docs.gitlab.com/ee/user/application_security/security_dashboard/#project-security-dashboard) -  provides a comprehensive set of features for viewing and managing vulnerabilities.\n* [CI/CD](https://docs.gitlab.com/ee/user/analytics/ci_cd_analytics.html) - tracks the history of your pipeline successes and failures, as well as how long each pipeline ran.\n* [Code Review](https://docs.gitlab.com/ee/user/analytics/code_review_analytics.html) - displays open merge requests and their review time.\n* [Insights](https://docs.gitlab.com/ee/user/project/insights/index.html)- allows you to configure custom analytics that will be displayed.\n* [Issue](https://docs.gitlab.com/ee/user/group/issues_analytics/index.html) - illustrates the number of issues created each month.\n* [Merge Request](https://docs.gitlab.com/ee/user/analytics/merge_request_analytics.html) - displays information that will help you evaluate the efficiency and productivity of your merge request process.\n* [Repository](https://docs.gitlab.com/ee/user/analytics/repository_analytics.html) - displays information such as commit statistics, code coverage, and programming languages used in the repository.\n* [Value Stream Analytics](https://docs.gitlab.com/ee/user/analytics/value_stream_analytics.html) - measures the time spent to go from an idea to production.\n\nSome analytics are only available for instance-level (self-managed), group level, or project level. Read [more](https://docs.gitlab.com/ee/user/analytics/) about analytics.\n\nThese analytics are a great way to see contributions from different projects and groups. On their own, however, they don't give insights into which processes your teams are using. For that, we offer DevOps Reports.\n\n## DevOps adoption reports\n\nDevOps Adoption is a DevOps Report located in group-level analytics. It shows you data for how teams in your organization use the most essential GitLab features.\n\nYou can use DevOps Adoption to:\n\n- Identify specific subgroups that are lagging in their adoption of GitLab features, so you can guide them on their DevOps journey.\n- Find subgroups that have successfully adopted certain features, and could provide guidance to other subgroups on how to use those features.\n- Verify if you are getting the return on investment that you expected from GitLab.\n\n![DevOps Adoption](https://about.gitlab.com/images/blogimages/devops_reports.png){: .shadow}\n\nIn this example, we can see some interesting data on how a team uses features in development, security, and operations categories:\n\n* **Development**\n  * Approvals: At least one merge request approval on a merge request.\n  * Code owners: At least 1 defined code owner that owns a specific file or repository in the group.\n  * Issues: At least 1 issue opened in this group.\n  * Merge requests: At least 1 merge request opened in this group.\n* **Security**\n  * DAST:  At least 1 DAST scan run in a pipeline in the group.\n  * Dependency Scanning: At least 1 dependency scan ran in a pipeline in the group.\n  * Fuzz Testing: At least 1 fuzz testing scan ran in a pipeline in the group.\n  * SAST: At least 1 SAST scan ran in a pipeline in the group.\n* **Operations**\n  * Deployments: At least 1 deployment.\n  * Pipelines: At least 1 pipeline ran successfully.\n  * Runners: At least 1 runner configured for the project or group.\n\nIn the future we plan to add even more feature categories to DevOps Reports, such as:\n* [Environments](https://docs.gitlab.com/ee/ci/environments/#environments-and-deployments)\n* [Pages](https://docs.gitlab.com/ee/user/project/pages/)\n* [Compliance Pipelines](https://docs.gitlab.com/ee/user/project/settings/index.html#compliance-pipeline-configuration)\n* [Incidents](https://docs.gitlab.com/ee/operations/incident_management/incidents.html)\n* [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/#review-apps)\n\n...and much more. You can follow our future plans in the following [epic](https://gitlab.com/groups/gitlab-org/-/epics/5019).\n\n_DevOps Reports are available for the Ultimate tier for self-managed and SaaS users. To find DevOps Reports, go to your group and in the left sidebar, select Analytics > DevOps adoption_\n\n## DevOps Score\n\nYou can use the DevOps score to compare your DevOps status to other organizations.\n\nThe DevOps Score tab shows usage of major GitLab features on your instance over the last 30 days. GitLab calculates the averages feature usage based on the number of billable users in that time period. You can also see the Leader usage score, calculated from top-performing instances based on Service Ping data that GitLab collects. GitLab compares your score to the lead score of each feature and shows it as a percentage underneath the feature. Your overall DevOps Score is an average of your feature scores.\n\nTo analyze your DevOps Score, GitLab aggregates Service Ping (sometimes referred to as Usage Ping) data on GitLab servers for analysis. Your usage information is not sent to any other GitLab instances. If you have just started using GitLab, it may take a few weeks for GitLab to collect enough data to calculate your DevOps Score.\n\n![DevOps Score](https://about.gitlab.com/images/blogimages/dev_ops_score_v12_6.png){: .shadow}\n\n_DevOps score is available at the admin panel for all tiers under Analytics > DevOps Reports._\n\nTo see the DevOps score, you must activate your GitLab instance’s [Service Ping](https://docs.gitlab.com/ee/administration/settings/usage_statistics.html#service-ping). This is because DevOps Score is a comparative tool, so your score data must first be centrally processed by GitLab, Inc.\n\nThere are several benefits of enabling Service Ping, such as DevOps Score and cohorts:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ZhLrhZlb_zI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Cohorts\n\nCohorts shows your teams' GitLab activities over time, and is a useful tool for administrators to view user retention and manage seats in their GitLab instance.\n\n![Cohorts](https://about.gitlab.com/images/blogimages/cohorts_v13_9_a.png){: .shadow}\n\nUsers are considered active if they have performed at least one of the following activities:\n\n* Sign in to GitLab.\n* Perform a Git activity such as push or pull.\n* Visit pages related to dashboards, projects, issues, or merge requests.\n* Use the API.\n* Use the GraphQL API.\n\nCover image credit:\n\nCover image by [John Schnobrich](https://unsplash.com/photos/FlPc9_VocJ4) on [Unsplash](https://unsplash.com)\n{: .note}\n",[1128,836,749],{"slug":3173,"featured":6,"template":683},"devops-adoption","content:en-us:blog:devops-adoption.yml","Devops Adoption","en-us/blog/devops-adoption.yml","en-us/blog/devops-adoption",{"_path":3179,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3180,"content":3186,"config":3191,"_id":3193,"_type":16,"title":3194,"_source":18,"_file":3195,"_stem":3196,"_extension":21},"/en-us/blog/dont-confuse-these-twelve-shortcuts-with-iteration",{"title":3181,"description":3182,"ogTitle":3181,"ogDescription":3182,"noIndex":6,"ogImage":3183,"ogUrl":3184,"ogSiteName":697,"ogType":698,"canonicalUrls":3184,"schema":3185},"Don’t confuse these 12 shortcuts with iteration","Iteration is a GitLab value. Sid Sijbrandij, GitLab’s co-founder and CEO, discusses 12 shortcuts that are not iterations to help refine what is considered a good iteration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663383/Blog/Hero%20Images/tanuki-bg-full.png","https://about.gitlab.com/blog/dont-confuse-these-twelve-shortcuts-with-iteration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Don’t confuse these 12 shortcuts with iteration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2021-12-01\",\n      }",{"title":3181,"description":3182,"authors":3187,"heroImage":3183,"date":3188,"body":3189,"category":14,"tags":3190},[2379],"2021-12-01","\n\n[Iteration](https://handbook.gitlab.com/handbook/values/#iteration) is one of GitLab’s [top 3 values](https://handbook.gitlab.com/handbook/values/#hierarchy) because it enables everyone to be efficient in how they deliver value to customers and the wider community. Iteration helps us [build our product and improve our work lives](/blog/power-of-iteration/). \n\nIn essence, iteration is reducing the scope of your next piece of work to the smallest valuable thing possible so that you can deliver it fast. By reducing the scope and delivering to the user, rather than holding it back while finishing a larger scope of work, you benefit in the following ways: \n\n- Reduce coordination efforts\n- Reduce cancellations\n- Easier and faster reviews\n- Get feedback faster\n\nIterating helps you ensure that your next step is in the right direction. \n\nAs we’ve practiced iteration at GitLab, I’ve found that when it’s not clearly understood, well-intentioned mistakes can happen. Because iteration is fundamental to everything that we do, it’s critical to regularly reinforce and refine what we define as an iteration. \n\nTo help clarify what is iteration, it helps to see examples of what iteration is not. Here are 12 shortcuts that I’ve seen be mistaken as iteration. \n\n## 1. Reducing quality\n\nSome people will take shortcuts, which leads to lower quality in the final product or deliverable. You can’t reduce quality to minimize the scope of an iteration. Your iteration needs to meet the same quality standards you would expect for any of your work. \n\nFor example, in the case of a user interface, every button needs to work and be properly styled and aligned. Nothing should feel out of place or unfinished. You can reduce the amount of functionality, but the functionality that you deliver needs to look and function as expected. \n\n## 2. Avoiding or reducing documentation\n\nWhen defining the scope of an iteration, make sure you include the right information so that the user can properly understand what happened and can derive value from your work. In the case of a new feature, without proper documentation, the recipient may not understand how to best use the feature, which defeats the purpose of delivering it quickly. \n\nIteration will make documentation faster given the reduced scope, so don’t avoid or delay the documentation. \n\n## 3. Compromising on security\n\nYou can’t compromise on security in the spirit of moving faster. An iteration must meet the same security standards and follow all the necessary security practices to ensure that your product and work doesn’t introduce any new vulnerabilities. \n\nAs an example, when building new features in GitLab, no matter how small an iteration is defined, we should always prioritize the protection of customers’ data. \n\n## 4. Delivering something that's not on the recommended path or on by default\n\nTo call an iteration complete, it needs to be on the recommended path or on by default. Otherwise, most users won’t see or benefit from the work.\n\nAt GitLab, in the past, we have made the mistake of considering an iteration complete before making a new feature the default or recommended path for all our users, which then results in fewer users for that functionality. To prevent having functionality in the product that users won’t find, we now require that the feature is on the recommended path and on by default before we call the iteration complete. \n\nFor big changes, such as when a feature may have a big impact on user experience or stability, we use feature flags when initially shipping a feature. This is a good strategy to start delivering something gradually into the product, but we don’t consider the iteration done until that feature flag has been removed and the feature is on by default. \n\n## 5. Shipping something of no value\n\nWhen considering the smallest scope possible for an iteration, the ultimate test needs to be whether it delivers something of value to the end user. Don’t confuse iteration with making progress on an initiative or checking off items on your to-do list. \n\nFor example, when building a new feature you may need to do a fair amount of set up. You may ship to production code that adds a configuration or capability that you’ll need to build the feature, but it’s completely transparent to the user. While that can be considered progress on your project timeline, it is not an iteration. The iteration completes when the user can start to derive some value from your efforts. \n\n## 6. An excuse to focus on unimportant items\n\nIteration will help you move faster and deliver more things, but you still have to prioritize and focus on what’s most important. When picking what to work on, you shouldn’t do first what’s smallest in scope. Instead, pick what will give you the highest value for the effort you’ll put in.\n\n## 7. Changing or lowering goalposts\n\nChanging a goal or lowering a goal is not iteration. Iteration is reducing the scope and keeping it small, but the reduced scope still needs to meet your goals. As you practice iteration, you may set goals for smaller time periods, which is a good practice I recommend. But changing the goal post is not a part of iteration. \n\n## 8. Revisions you don't ship or publish\n\nIt’s a common mistake to confuse revisions with iterations. To clearly understand the difference, see whether you’ve shipped or delivered something of value to the end user. If you haven’t, it is a revision, not an iteration. \n\nFor example, if when writing a blog post you get a draft reviewed and rewritten several times before publishing, those are considered revisions. Your first iteration is completed once you’ve published the first version of the blog post. \n\n## 9. An excuse to impose unrealistically tight timelines\n\nIf you set a timeline, it has to be realistic. I’ve seen instances in which people confuse iteration with just shrinking the timeline to something unrealistic. That is not iterating. Iteration is minimizing scope, but it requires a disciplined review of the scope to ensure that you’re allocating the right amount of time to complete it. \n\n## 10. An excuse to avoid planning\n\nSometimes teams confuse iteration with moving quickly on something without planning. This is not iterating. By reducing the scope, there will be less planning involved compared to the initial larger scope. But, no matter how small you make the scope of work, you need to plan, and that planning can be quite involved. You need to set an appropriate timeline to deliver the work and plan appropriately for reviews and dependencies. \n\nNot planning appropriately for an iteration negatively impacts efficiency, team morale, and can impact people’s lives outside of work. This needs to be avoided. \n\n## 11. Imposing long hours\n\nDon’t confuse iteration with imposing long working hours for a team. The goal of iterating is to scope work in a way that helps you deliver more tangible value within the same amount of time. Increasing the number of hours that the team works is not iteration. \n\n## 12. Expecting others to fix your work\n\nWhen you iterate, you need to take ownership and make sure that the end result is of value and meets all expectations of a finished product. You should not call an iteration complete if the work still requires fixing in order to be of value or meet quality expectations. \n\nAs GitLab grows as a company, reinforcing our Iteration value and staying clear about what is an iteration is fundamental to us [staying a startup](/company/still-a-startup/). I hope these 12 examples that are not iteration are helpful and empower everyone to help identify and correct situations when iteration is used incorrectly. Using iteration correctly will help us continue to move fast and deliver more value to our customers. It will also help keep the day-to-day momentum as we deliver valuable results. \n\nWatch this GitLab Unfiltered video where I discuss these 12 shortcuts that are not iteration.\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n  \u003Ciframe src=\"https://www.youtube.com/embed/BW6TWwNZzIQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n",[1128,727,1069],{"slug":3192,"featured":6,"template":683},"dont-confuse-these-twelve-shortcuts-with-iteration","content:en-us:blog:dont-confuse-these-twelve-shortcuts-with-iteration.yml","Dont Confuse These Twelve Shortcuts With Iteration","en-us/blog/dont-confuse-these-twelve-shortcuts-with-iteration.yml","en-us/blog/dont-confuse-these-twelve-shortcuts-with-iteration",{"_path":3198,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3199,"content":3204,"config":3209,"_id":3211,"_type":16,"title":3212,"_source":18,"_file":3213,"_stem":3214,"_extension":21},"/en-us/blog/gitlab-incident-management",{"title":3200,"description":3201,"ogTitle":3200,"ogDescription":3201,"noIndex":6,"ogImage":1874,"ogUrl":3202,"ogSiteName":697,"ogType":698,"canonicalUrls":3202,"schema":3203},"Downtime happens, but GitLab Incident Management can help","GitLab's DevOps Platform doesn't just make it easy to release safe software faster, it also streamlines the process for problem solving. Here's a deep dive into GitLab Incident Management.","https://about.gitlab.com/blog/gitlab-incident-management","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Downtime happens, but GitLab Incident Management can help\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2021-11-30\",\n      }",{"title":3200,"description":3201,"authors":3205,"heroImage":1874,"date":3206,"body":3207,"category":14,"tags":3208},[702],"2021-11-30","\n\nDowntime is expensive and the cost is growing. Software reliability is as important as the product itself – it doesn't matter what your product can do if your customers can't reliably access it. GitLab's Incident Management is built-in to our [DevOps Platform](/solutions/devops-platform/) and empowers teams with adaptable practices and a streamlined workflow for triage and resolving incidents. We offer tools that provide access to observability resources, such as metrics, logs, errors, runbooks, and traces, that foster easy collaboration across response teams, and that support continuous improvement via post-incident reviews and system recommendations. Here's a look at how it all works.\n\n## The costs of being down\n\nDowntime can cost companies hundreds of thousands of dollars in a single hour. Avoiding downtime is critical for organizations. Companies need to invest time, establish processes and culture around managing outages, and have processes to resolve them quickly. The larger an organization becomes, the more distributed their systems. This distribution leads to longer response times and more money lost. Investing in the right tools and fostering a culture of autonomy, feedback, quality, and automation leads to more time spent innovating and building software. If done well, teams will spend less time reacting to outages and racing to restore services. The tools your [DevOps](/topics/devops/) teams use to respond during incidents also have a huge effect on MTTR (Mean Time To Resolve, also known as Mean Time To Repair).  \n\n## What is an incident? \n\nIncidents are anomalous conditions that result in — or may lead to — service degradation or outages. Those outages can impact employee productivity, and decrease customer satisfaction and trust. These events require human intervention to avert disruptions or restore service to operational status. Incidents are always given attention and resolved.\n\n## What is Incident Management? \n\nIncident Management is a process which is focused on restoring services as quickly as possible and proactively addressing early vulnerabilities and warnings, all while keeping employees productive and customers happy. \n\n## Meet GitLab Incident Management \n\n[GitLab Incident Management](https://docs.gitlab.com/ee/operations/incident_management/) aims to decrease the overhead of managing incidents so response teams can spend more time actually resolving problems. We accelerate problem resolution through efficient knowledge sharing in the same tool they already use to collaborate on development. Enabling teams to quickly gather resources in one central, aggregated view gives the team a single source of truth and shortens the MTTR. \n\nGitLab’s built-in Incident management solution provides tools for the triage, response, and remediation of incidents. It enables developers to easily triage and view the alerts and incidents generated by their application. By surfacing alerts and incidents _where the code is being developed_, problems can be resolved more efficiently. \n\n## Why Incident Management within GitLab?\n\nGitLab is a [DevOps Platform](/solutions/devops-platform/), delivered as a single application. As such, we believe there are additional benefits for DevOps users to manage incidents within GitLab.\n\n1. Co-location of code, CI/CD, monitoring tools, and incidents reduces context switching and enables GitLab to correlate what would be disparate events or processes within one single control pane.\n\n2. The same interface for development collaboration and incident response streamlines the process. The developers who are on-call can use the same interface they already use every day; this prevents the incident responders from having to use a tool they are unfamiliar with and thus hampering their ability to respond to the incident.\n\n## How to manage incidents in the GitLab DevOps Platform\n\n### Create an incident manually or automatically \n\nYou can create incidents manually or enable GitLab to create incidents automatically whenever an alert is triggered. If you use PagerDuty for incidents, you can [set up a webhook with PagerDuty](https://docs.gitlab.com/ee/operations/incident_management/incidents.html#create-incidents-via-the-pagerduty-webhook) to automatically create a GitLab incident for each PagerDuty incident. \n\n![pd](https://about.gitlab.com/images/blogimages/incident-mgmt/pager.png)\n\n### Alert Management \n\n[Alerts](https://docs.gitlab.com/ee/operations/incident_management/alerts.html) are a critical entity in incident management workflow. They represent a notable event that might indicate a service outage or disruption. GitLab can accept alerts from any source via a webhook receiver. GitLab provides a list view for triage and detail view for deeper investigation of what happened.\n\n![alert](https://about.gitlab.com/images/blogimages/incident-mgmt/alert.png)\n\n### On-Call Schedules\n\nTo maintain the availability of your software services you need to schedule on-call teams. [On-call schedule management](https://docs.gitlab.com/ee/operations/incident_management/oncall_schedules.html) is being used to create schedules for responders to rotate on-call responsibilities. Within each schedule you can add team members to rotations that last hours, weeks or days depending on your team's needs. Some teams need to be on-call just during business hours, while others have someone on-call 24/7, 365; every team is different.  \n\n![on-call](https://about.gitlab.com/images/blogimages/incident-mgmt/on-call.png)\n\n### Escalation Policies\n\n[Escalation Policies](https://docs.gitlab.com/ee/operations/incident_management/escalation_policies.html) determine when users on-call get notified and what happens if they don’t respond. They are the if/then logic that use on-call schedules to make sure teams never miss an incident. You can create an escalation policy in the GitLab project where you manage on-call schedules.\n\n![escalation](https://about.gitlab.com/images/blogimages/incident-mgmt/escalation.png) \n\n### Paging and Notifications \n\nWhen there is a new alert or incident, it is important for a responder to be notified immediately so they can triage and respond to the problem. GitLab Incident Management supports email notifications, with plans to add Slack notifications, SMS, and phone calls. \n\n\n\n\n\n\n",[1128,1428,728],{"slug":3210,"featured":6,"template":683},"gitlab-incident-management","content:en-us:blog:gitlab-incident-management.yml","Gitlab Incident Management","en-us/blog/gitlab-incident-management.yml","en-us/blog/gitlab-incident-management",{"_path":3216,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3217,"content":3222,"config":3229,"_id":3231,"_type":16,"title":3232,"_source":18,"_file":3233,"_stem":3234,"_extension":21},"/en-us/blog/five-devops-platform-benefits-that-inspire-gitlab-users",{"title":3218,"description":3219,"ogTitle":3218,"ogDescription":3219,"noIndex":6,"ogImage":3183,"ogUrl":3220,"ogSiteName":697,"ogType":698,"canonicalUrls":3220,"schema":3221},"Five benefits that inspire users to become GitLab advocates","Learn how a single tweet led to insight from our advocates on how they use GitLab to improve their work and their own lives.","https://about.gitlab.com/blog/five-devops-platform-benefits-that-inspire-gitlab-users","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 DevOps platform benefits that inspire GitLab users to become GitLab advocates\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Omar Fernandez\"}],\n        \"datePublished\": \"2021-11-23\",\n      }",{"title":3223,"description":3219,"authors":3224,"heroImage":3183,"date":3226,"body":3227,"category":14,"tags":3228},"5 DevOps platform benefits that inspire GitLab users to become GitLab advocates",[3225],"Omar Fernandez","2021-11-23","\n\nAt GitLab, we believe that a [single DevOps platform helps teams](https://about.gitlab.com/handbook/product/single-application/) to collaborate better and deliver software faster and with better security. In September, GitLab’s CEO Sid Sijbrandij [asked on Twitter](https://twitter.com/sytses/status/1440799819119824898) for volunteers willing to share their stories of advocating for the adoption of GitLab. Over the following days, GitLab team members interviewed 25 GitLab advocates who offered to share their experiences. Among other things, we asked them: \n\n- How did you first encounter GitLab?\n- Why have you advocated for the adoption of GitLab?\n- How has advocating and using GitLab in your organization benefited you? \n\nOur advocate interviews validated that GitLab’s single-application [DevOps Platform](/solutions/devops-platform/) has unlocked value for GitLab users. Below are excerpts of some of these interviews to give you the opportunity to hear directly from GitLab users. In them, you'll learn about five GitLab benefits that converted these users into advocates. \n\n## 1. A single application helps focus on work that matters\n\nGitLab’s single application helps users to focus on work that matters. GitLab reduces the need to context-switch as users no longer need to jump across disparate point-solutions. Users are better able to focus, stay on task, and drive business results.\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/pMWXn6NslEE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nSam Briesemeister highlights the benefits of working on one platform, being able to link the work done to a specific issue, and increasing developer productivity. By using GitLab, users save time in their life. \n\n_“What [investing in GitLab] ultimately does is, actually, we’re saving somebody’s time [in] their life. We’re not wasting their life.”_\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n  \u003Ciframe src=\"https://www.youtube.com/embed/YcsT53c_Nuo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nProfessor Neil Gershenfeld speaks about how GitLab allows his labs to do what used to require five separate solutions, one each for web serving, teaching classes, access control, documentation, and security. \n\n_When asked how GitLab has made your life better, Professor Gershenfeld said: “It’s almost hard to answer because it’s like ‘why do I like air?’ It’s just sort of, most of my work ends up in GitLab. It’s just a natural part of my working day.”_\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/nUY8RrOyGPo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nPhilipp Westphalen, one of our [GitLab Heroes](https://about.gitlab.com/community/heroes/members/), speaks about GitLab’s ease of use and how having a single tool instead of multiple separate solutions allows him to focus on getting things done. \n\n_“For me, it feels like home... It’s really easy to use... and you can focus on getting things done.”_\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/heFWR23Z5nw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nPavle Djuric also speaks to us about the ability to focus on work and GitLab’s ease of use.\n\n_“[Working in GitLab] makes you feel very professional. You feel like you’re doing your job. You’re way more efficient as a team.”_\n\n## 2. Reducing manual tasks through automation\n\nSeveral advocates spoke about the benefits of automating tasks within GitLab to free up time for more productive activities. \n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/HW0ByLmG8sY\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nAndrew Jones speaks about using GitLab to reduce repetitive tasks. He can’t imagine going back to the old way of doing things with many manual tasks. \n\n_“It just takes care of the stuff that would normally be laborious, painful repetitive stuff and allows you to focus on your primary function. I couldn’t imagine working without GitLab. I can’t imagine going back to the old way.”_\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/oN1cieaeLBk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nJan Mikes tells us about automation and the ability to get things done without context switching or moving across apps. This helps his productivity and efficiency. \n\n_“There’s high demand for CI engineers and since I work as an architect, this is a high-demand skill, to write pipelines, optimize performance, shorten the time from writing the code to deploy to production. And all of this I can deliver and that’s another reason why I love GitLab, because anything that comes to my mind, I figure some way how to do it with GitLab CI.”_\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/SvQUM6DL1B4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nMarc-Aurele Brothier speaks to us about how the adoption of GitLab helped his team be more efficient and streamline collaboration. \n\n_“[A customer was] very happy because we could demonstrate [to] them that, with [GitLab], that they could create the release, open a PR, say I want to deploy in our environment, and just accept it, and it was done two minutes later and it was automated. So it’s not anymore like asking someone, sending emails, or sending a request to another team. Just you do it and you get it.”_\n\n## 3. Improved transparency and collaboration\n\nTransparency and Collaboration are two of [GitLab’s values](https://handbook.gitlab.com/handbook/values/), so it was great to hear how adopting GitLab helps teams operate in a more transparent and collaborative manner. \n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/1rdtQ3tvDtg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nGerben Geijteman tells us that collaboration and communication are enhanced by the transparency you get in GitLab by having the solution, or code, linked to directly from the issue tracker. This benefits collaboration with other team members and with clients. \n\n_“GitLab for me unifies it all in the same place so everyone is looking at the same code in the same direction with the same quality level.”_\n\n_“In projects where we have direct customer communication, we like to also use GitLab because GitLab gives you a more direct mode of communication. You can say we fixed this issue with this particular line of code at that particular moment in time. And it takes away intermediate layers of communication... and it keeps everything in context.”_\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/pWVEnIQjGbE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nSebastian Schmid talks about how, since the adoption of GitLab, different teams in his organization are able to share and reuse source code more easily.\n\n_“Before [GitLab], only the team working on the code was able to see the code... [After starting to use GitLab] they started to use source code from other teams and collaborate in code with other teams.”_ \n\n_“People could start to contribute to our product and they don’t need to have some special skills. They just could write [an] issue, could paste some screenshots and stuff like that, and some engineers could take this and improve the product.”_\n\n## 4. A welcoming community\n\nAnyone can directly contribute to our open source GitLab core and help enhance the GitLab platform. We appreciated hearing advocates talk about their experiences contributing to GitLab and how welcoming the GitLab community has been. \n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n  \u003Ciframe src=\"https://www.youtube.com/embed/qy9f-7DI_5k\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nDave Barr speaks about the value of working on a platform with an open source core and how great it is to see GitLab employees interact with contributors in the same way that they would with other GitLab employees. \n\n_“How you interact with that community is really telling. The way GitLab staff does that is really embracing, welcoming, open to feedback. They provide feedback; it's just like you’re a staff member. The approach they take to community contributions is the same exact approach they take for a staff merge request and that’s a fantastic approach.”_ \n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/LNp3ioZr5mg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nGary Bell talks to us about how welcoming, responsive, and understanding the GitLab community is with new people who want to contribute. \n\n_“Personally, I’ve just found the overall GitLab community to be very welcoming and very understanding. Just the patience people have... they’re just welcoming and willing to give the time to help. That’s been absolutely fantastic to feel that, which is something that I’ve not felt when I’ve tried to contribute to other open source projects before.”_\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/4-z3QjPzFPk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nSimilarly, Sven Patrick Meier shares with us his journey from identifying a potential feature, proposing it, and working through the process to get the contribution accepted. \n\n_“[I submitted a contribution] and the maintainers of the project commented on my feature request and said ‘great idea,’ and I provided the template as a basic example. They helped me with so many things, and, right now, I’m right before the first contribution to that awesome product that I use every day.”_ \n\n## 5. Exposure to end-to-end modern software development\n\nUsers talk about how GitLab helps introduce them to modern software development practices. They appreciated GitLab’s monthly releases packed with new features. \n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/YMydvPCIg44\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nMarcos Ortiz praises GitLab’s ease of use and how it made it easier to onboard team members. Marcos also speaks about how, when you get used to the way of doing things on GitLab, you can internalize beneficial development practices.\n\n_“When you get used to all these practices, load code frequently, to get everything in branch inside your repo story, I believe you can be faster in development.”_ \n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/fP50GWZxz48\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nIn our discussion with Andy Malkin and Michael Kalange, we hear about how they feel that GitLab is not only on the cutting edge, but also a very reliable part of their work. \n\n_“When I use [GitLab], I feel like I’m on the cutting edge. A lot of time in tech you can feel like you’re using something and you know it’s outdated, but I don’t have that feeling with GitLab. When I’m using GitLab, I feel that you really are pushing the boundaries in terms of what’s the next thing that we need.”_ \n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/7gU12X10718\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nChris Evans speaks to us about how GitLab's neatly organized user interface helped him learn more about the overall DevOps processes and tools.\n\n_“[I] started off as a network engineer [and] I ended up [in] some sysadmin-related roles but I was never really exposed to the software development lifecycle... but just through choosing GitLab as a project management platform, I was exposed to so many of the tools of the trade for this other industry, software engineering, and I was able to almost learn those in a way without having to use them directly.”_  \n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/JhfFlSBQ7tY\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nSimilarly, Ion Nistor tells us about how he gets exposed to new areas and tools in DevOps by using GitLab. \n\n_“I like to learn. Many of the things that GitLab brings are related to new technologies and new ways of doing things. GitLab in this sense acted [as a] gateway to new technologies. I have to learn about DevOps more, about containers, and these are benefits for my personal development.”_ \n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/yuBeOxqnou4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nDibyadarshi Dash, a past intern at GitLab, highlights how having a single, integrated product to develop software made it easier for him to learn about software development end-to-end. \n\n_“I got a good exposure to what the software development lifecycle looks like because it was all in one platform, all in GitLab. The writing, coding, merging, deploying, testing, everything was in one platform. And I feel that because it was all in one integrated platform, I got a good holistic exposure to the whole cycle and I understood the cycle even better.”_ \n\n## Bringing it all together\n\nThe GitLab advocates reinforced our belief in GitLab’s value as a single DevOps platform. The advocates talked about being able to focus on getting work done, using automation to reduce manual steps, and transparently collaborating with their colleagues. They also highlighted how GitLab helps them get exposed to and learn about the modern software development process end-to-end thanks to our fast innovation and how our welcoming open source community has made it possible for them to contribute features to GitLab.\n",[1128,1943,269],{"slug":3230,"featured":6,"template":683},"five-devops-platform-benefits-that-inspire-gitlab-users","content:en-us:blog:five-devops-platform-benefits-that-inspire-gitlab-users.yml","Five Devops Platform Benefits That Inspire Gitlab Users","en-us/blog/five-devops-platform-benefits-that-inspire-gitlab-users.yml","en-us/blog/five-devops-platform-benefits-that-inspire-gitlab-users",{"_path":3236,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3237,"content":3242,"config":3248,"_id":3250,"_type":16,"title":3251,"_source":18,"_file":3252,"_stem":3253,"_extension":21},"/en-us/blog/you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles",{"title":3238,"description":3239,"ogTitle":3238,"ogDescription":3239,"noIndex":6,"ogImage":3183,"ogUrl":3240,"ogSiteName":697,"ogType":698,"canonicalUrls":3240,"schema":3241},"How contributors earned full-time engineering roles at GitLab","As we continue to celebrate the 10th anniversary of the first commit to GitLab, here's a look at how two highly active community members became enthusiastic team members.","https://about.gitlab.com/blog/you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"You're hired! Two GitLab contributors turn their success into full-time engineering roles\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-11-12\",\n      }",{"title":3243,"description":3239,"authors":3244,"heroImage":3183,"date":3245,"body":3246,"category":14,"tags":3247},"You're hired! Two GitLab contributors turn their success into full-time engineering roles",[1524],"2021-11-12","[Greg Myers](https://gitlab.com/greg) and [Rajendra Kadam](https://gitlab.com/rkadam3) have something beyond their engineering roles at GitLab in common – both started out as GitLab contributors. We wanted to share their stories as part of our celebration around the 10th anniversary of the first commit to GitLab.\n\nMyers, a GitLab Senior Support Engineer, says his contributions started in 2018, when he first found his passion for helping other community forum members. \n\n“Most of my early contributions involved helping people set up, configure, and troubleshoot self-hosted GitLab installations,” Myers says.\n\nHe enjoyed this helper role so much he applied for an engineering position, but failed the technical interview and didn’t receive an offer. “I kept contributing to GitLab and helping others in the forum while I leveled up in my weak areas,” he says.\n\nKadam, a GitLab Back-end Engineer and [GitLab hero](/community/heroes/members/), started contributing to GitLab in Jan 2020 to learn more about Ruby on Rails and apply it to his then-workplace. \n\n“I did not stop after that since it is more than the code. I loved working with people at GitLab and the culture, even though I was not a full-time team member,” Kadam says.\n\nLike Kadam, Myers enjoyed being a part of the GitLab community. “The majority of my ‘code’ contributions back then were quite simple – fixing typos and markdown formatting issues in documentation,” he says. “I'd never contributed to an open source project of this size and caliber, and I was impressed by how easy and smooth it was to get involved and contribute.”\n\nHe remembers feeling “star-struck” when GitLab co-founder Dmitriy Zaporozhets personally responded in the comments to one of his first MRs.\n\nUsing what he learned as a contributor, Kadam earned a promotion from his employer. He went on to participate in [GitLab hackathons](/community/hackathon/), winning three in a series. His prominence in the GitLab community led him to be offered and to accept an internal engineering role in February 2021. Kadam blogged about the journey from being a contributor to a team member [on Medium](https://rajendraak.medium.com/how-i-got-a-job-at-gitlab-a3515214b74b).\n\nMyers, meanwhile, feeling more confident about his skills, took another shot at a team member role. “After four months, I reapplied for the support engineer position, and this time I got the job. Now it is my job to help others with GitLab and contribute to GitLab, and I love what I do,” Myers says.\n\nAs a Developer Relations Support counterpart, he helps others in the GitLab community forum and advocates for the GitLab wider community. And, as a GitLab Open Source Support Liaison, “I give back to open source communities I know and love,” he says.\n\nHe encourages others to not only contribute to the GitLab community but to help other forum members as he did. After all, you never know where those contributions can lead. “Being a GitLab community member and contributor led me to my dream job,” he says.",[1128,1943,1210],{"slug":3249,"featured":6,"template":683},"you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles","content:en-us:blog:you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles.yml","You Are Hired Two Gitlab Contributors Turn Their Success Into Full Time Engineering Roles","en-us/blog/you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles.yml","en-us/blog/you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles",{"_path":3255,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3256,"content":3262,"config":3267,"_id":3269,"_type":16,"title":3270,"_source":18,"_file":3271,"_stem":3272,"_extension":21},"/en-us/blog/6-tips-to-make-software-developer-hiring-easier",{"title":3257,"description":3258,"ogTitle":3257,"ogDescription":3258,"noIndex":6,"ogImage":3259,"ogUrl":3260,"ogSiteName":697,"ogType":698,"canonicalUrls":3260,"schema":3261},"6 tips to make software developer hiring easier","If your developers are leaving and it's tough to hire, here's our best advice to stem the tide. One hint: A DevOps Platform can help!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668486/Blog/Hero%20Images/why-you-should-join-the-gitlab-security-team.jpg","https://about.gitlab.com/blog/6-tips-to-make-software-developer-hiring-easier","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"6 tips to make software developer hiring easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2021-11-09\",\n      }",{"title":3257,"description":3258,"authors":3263,"heroImage":3259,"date":3264,"body":3265,"category":14,"tags":3266},[1859],"2021-11-09","\nMastering software developer hiring has never been more critical – or more difficult.\n\nIn fact, it’s almost the perfect storm: There’s a global and growing shortage of developers; voluntary job turnover rates in the US are 25% (almost double what they were three years ago); and demand for skilled engineers is expected to outstrip supply by 1.2 million in three years, according to the Bureau of Labor Statistics. \n\nAt the same time, what developers want, or will settle for, has changed, perhaps at least partially driven by the pandemic. Developers want meaningful challenges, a flexible work-life balance, tools and processes that don’t slow them down, and, increasingly, the option to work completely remotely.\n\nSo what can you do to keep your developers from leaving and make software developer hiring easier? Here’s our best advice:\n\n* Understand the developer mentality. “Of all the tech roles, developers are the most fickle,” says GitLab’s staff developer evangelist [Brendan O’Leary](/company/team/#brendan), who, with nearly 20 years experience as a developer, is in a very good position to know. “They don’t want to put up with a lot and tend to have strong opinions.” \n\n* Stop measuring. It is possible to tie developer productivity to results, and not a mandatory 40-hour work week. How do we know this? Because that’s how GitLab operates, by [measuring results and not hours spent](https://handbook.gitlab.com/handbook/values/#results). “Companies need to stop measuring knowledge workers, like developers, by the hours they spend,” O'Leary says. “That’s the worst thing you could do.” Instead, build a culture that values paid time-off, family leave, and other work-life balance efforts because those will resonate with developers, he stresses. \n\n* Up your tool game. The [science has spoken](/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen/) and developers drowning in information overload aren’t as productive, which ties directly into job satisfaction and happiness. In our [2021 Global DevSecOps Survey](/developer-survey/), we heard a lot about tool chains with between five and 15 tools on them, and often there wasn’t just one tool chain in play, but several. That’s a lot of noise. A [DevOps platform](/solutions/devops-platform/) streamlines code development, testing, deployment, and monitoring and definitely improves a company’s ability to successfully do DevOps. When we asked respondents to tell us in their own words about the benefits of a DevOps platform, this comment summed it up:  “Reduced mean time to recovery (MTTR), quicker time to market, reduced lead time for fixes, and fewer change failures.” \n\n* Embrace flexible work. Nearly two years into a global pandemic, the time is right for companies to be deliberate about their choices. Developers are going to choose employers who have thought through all the options, whether it’s fully remote, remote with flexibility, or other combinations. But they’re not going to settle for companies trying to patchwork it without a solid plan. “Not every company is losing developers,” O'Leary says. “Developers are going to the places that understand the flexibility in life that can come from remote, while also not sacrificing any productivity.”\n\n* Don’t forget “concrete” perks. Free soda and “bring your dog to work” days probably aren’t enough to make developers consider your team, or decide to stay long term. Focus on what matters: time for volunteer or side projects, a collaborative culture, and demonstrable recognition of success. We offer [discretionary bonuses](/handbook/incentives/#discretionary-bonuses). Some companies send handwritten notes from senior leadership, while others meet monthly for group celebrations. Whatever you do, just make it sure it’s authentic.\n\n* Assess the skills gap. You’ve got unfilled roles and DevOps team members itching for a change. Why not marry the two?  West Monroe, a Chicago-based technology consulting firm, found 56% of managers surveyed rated their organization’s skills gap [as moderate to severe](https://www.westmonroe.com/perspectives/signature-research/the-upskilling-crisis-effectively-enabling-and-retraining-employees-for-the-future?utm_source=google&utm_medium=cpc&utm_term=upskilling&utm_content=!acq!v3!118035700243_kwd-333379491008__501805835687_g_c__&utm_campaign=Search%3A+Prospecting%3A+BA%3A+Priority+Content%3A+Gated%3A+Tier+3_BBM&atrkid=V3ADWED098667_118035700243_kwd-333379491008__501805835687_g_c___&gclid=CjwKCAjw7--KBhAMEiwAxfpkWF1Xg74_9zydAzfcJLt0t90OMh7MYsyV3yOfwK4bJWt-OBX1BzW2mRoClv4QAvD_BwE). And a survey from the McKinsey Quarterly discovered 53% of executive respondents felt [reskilling](https://www.mckinsey.com/business-functions/mckinsey-accelerate/our-insights/five-fifty-the-skillful-corporation?cid=fivefifty-eml-alt-mkq-mck&hlkid=a7a8ae1b68574d02b81db1f1eeb8fd8d&hctky=12428831&hdpid=8233aa33-5ff4-4450-a4c7-2f47dfeaf9d0) was the best solution to the skills gap. So stand out from the crowd and offer solid learning paths to employees, as well as tuition reimbursement. At the very least, offer your DevOps team time for [DIY learning](/blog/best-advice-for-your-devops-career-keep-on-learning/), as needed. Also consider [job swapping](https://www.managersorbit.com/job-swapping-benefits/), which can be a great way to expose employees to new career opportunities.\n \n_Sharon Gaudin contributed to this blog post._\n\n\n## Read more on DevOps careers: \t\t\n\n- [Best advice for your DevOps career? Keep on learning](/blog/best-advice-for-your-devops-career-keep-on-learning/)\n\n- [Four tips to increase your DevOps salary](/blog/four-tips-to-increase-your-devops-salary/)\n\n- [DevOps salaries in 2021: Where do you rank?](/blog/a-look-at-devops-salaries/)\n\n- [Have DevOps jobs to fill? Try these 3 strategies to hire and retain](/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain/)\n\n\n\n",[1408,1128,1466],{"slug":3268,"featured":6,"template":683},"6-tips-to-make-software-developer-hiring-easier","content:en-us:blog:6-tips-to-make-software-developer-hiring-easier.yml","6 Tips To Make Software Developer Hiring Easier","en-us/blog/6-tips-to-make-software-developer-hiring-easier.yml","en-us/blog/6-tips-to-make-software-developer-hiring-easier",{"_path":3274,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3275,"content":3281,"config":3287,"_id":3289,"_type":16,"title":3290,"_source":18,"_file":3291,"_stem":3292,"_extension":21},"/en-us/blog/the-devops-platform-for-agile-business",{"title":3276,"description":3277,"ogTitle":3276,"ogDescription":3277,"noIndex":6,"ogImage":3278,"ogUrl":3279,"ogSiteName":697,"ogType":698,"canonicalUrls":3279,"schema":3280},"The DevOps Platform for agile business","For a truly agile business, in every sense of the word, we think you need GitLab's DevOps Platform. Here's why.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668155/Blog/Hero%20Images/devops-strategy-devops-toolchain.png","https://about.gitlab.com/blog/the-devops-platform-for-agile-business","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The DevOps Platform for agile business\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cormac Foster\"}],\n        \"datePublished\": \"2021-11-03\",\n      }",{"title":3276,"description":3277,"authors":3282,"heroImage":3278,"date":3284,"body":3285,"category":14,"tags":3286},[3283],"Cormac Foster","2021-11-03","\n\nIf you’ve spent time on the site, you already know that GitLab is The DevOps Platform, a single-application solution that is radically different than DIY DevOps toolchains. Have questions? Great — because we have answers, and [we’d love to chat](/demo/). But let’s address the most important question of all right here: “What’s in it for me?” Our approach is objectively different from other solution providers, but why should you care?\n\nIt’s a valid question, and one you should feel comfortable asking any solution provider. You don’t trade your station wagon for a Ferrari for looks, you get the Ferrari because it wins races. In our case, [The DevOps Platform](/solutions/devops-platform/) will absolutely make you better at “doing DevOps.” You can simplify your infrastructure like [Glympse](/customers/glympse/), which consolidated 20 tools into one. You can speed your delivery like [Goldman Sachs](/customers/goldman-sachs/), which increased deployments from two per month to 1,000 per day. You can run security scans concurrently with development like [Chorus](/customers/chorus/). These are all substantial improvements that can [generate real ROI](/resources/report-forrester-tei/) right away while improving quality of life for your employees. On its own, that’s fantastic — but incremental gains are just the start of the journey. \n\nUltimately, the point of The DevOps Platform isn’t doing DevOps better; it’s transforming your business and improving agility.\n\n_That’s a pretty bold statement._\n\nQuite, but it’s also true. We don’t want to improve DevOps outputs. We want to help you realize business outcomes. \n\nDevOps broke down silos between Development and Operations, ramping efficiency and velocity, but did that ultimately make businesses more profitable or innovate? Frequently, no, because the blocker to value creation just moved somewhere else — like Security, Product or Compliance. You can deploy 100x per day, but if a weeks-long audit process stands between any build and your customer, those efficiency gains might not matter. That’s where The DevOps Platform shines. As a single source of truth with a single data store, it provides visibility and context to every stakeholder in the company, whether they’re a Developer, Designer, Auditor, Security Professional or anyone else with a part to play.\n\nGot it. It’s about collaboration.\n\nAbsolutely. Our [mission statement](/company/mission/#mission) is “Everyone can contribute” because collaboration equals innovation. But you can’t just throw a wiki or a ticketing system at a problem and call it fixed. Contextual collaboration matters. If I’m a product owner and development is blocked or a security pro who sees that a known vulnerability wasn’t remediated, I need to know why, when and by whom. If you aren’t collaborating inside the system of work with real-time data, you’re probably missing critical context, which leads to poor decisions and, ultimately, less value in the hands of customers. When every company is a software company, every member of the business needs to be involved with and understand some aspect of software delivery.\n\n_So, now we’re talking about DevSecProductDesignFinanceAuditOps?_\n\nSure. But that’s a mouthful. Let’s just call it “agile business” because that’s really what The DevOps Platform gives you in the end. We’re extending the benefits of DevOps to a broader range of roles to remove those blockers at the edges of the process. This allows your whole organization to engage in and reap the rewards of agile thinking. And that brings us back to transformation. A platform is what’s been missing from so many Digital Transformation initiatives over the years, and a big reason why, even though businesses have gained efficiencies here and there, the primary goal of doing big things differently has been so hard to reach.\n\nNo software solution — not even GitlLab — can do the work for you, but that’s OK. You already know how to run your business. You have ideas and talent. You just need the software behind your software to get out of your way and support your business as you innovate. If you’ve already invested in transformation initiatives, The DevOps Platform can unlock the value of those investments. If you’re just getting started, it can help you scale your innovation immediately. To see how The DevOps Platform can work for you, [try GitLab Ultimate for free](/free-trial/).\n",[1128,1466,1528],{"slug":3288,"featured":6,"template":683},"the-devops-platform-for-agile-business","content:en-us:blog:the-devops-platform-for-agile-business.yml","The Devops Platform For Agile Business","en-us/blog/the-devops-platform-for-agile-business.yml","en-us/blog/the-devops-platform-for-agile-business",{"_path":3294,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3295,"content":3300,"config":3305,"_id":3307,"_type":16,"title":3308,"_source":18,"_file":3309,"_stem":3310,"_extension":21},"/en-us/blog/how-to-make-your-devops-team-elite-performers",{"title":3296,"description":3297,"ogTitle":3296,"ogDescription":3297,"noIndex":6,"ogImage":2088,"ogUrl":3298,"ogSiteName":697,"ogType":698,"canonicalUrls":3298,"schema":3299},"How to make your DevOps team elite performers","Every company wants DevOps done better. The DORA Report spotlights what it takes to be a DevOps elite, and what teams need to do to get there.","https://about.gitlab.com/blog/how-to-make-your-devops-team-elite-performers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to make your DevOps team elite performers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2021-10-26\",\n      }",{"title":3296,"description":3297,"authors":3301,"heroImage":2088,"date":3302,"body":3303,"category":14,"tags":3304},[1364],"2021-10-26","\n\nSo your company has a DevOps team –  great! – but are they elite performers or low performers?\n\nThere’s a chasm of difference between the two, according to the [State of DevOps 2021 report](https://gitlab.com/gitlab-com/www-gitlab-com/uploads/069ee8e2ee6af463cf0aafcd89eda33e/state-of-devops-2021.pdf) from DORA, the DevOps Research and Assessment team at Google. It’s the tipping point in how resilient, efficient and reliable your team is, and that’s directly tied to your ability to help your business be more competitive. (To be transparent, GitLab was one of the many sponsors of the report, and we’ve incorporated some of the DORA metrics [within our DevOps Platform](https://gitlab-com.gitlab.io/cs-tools/gitlab-cs-tools/what-is-new-since/?tab=features&s[…]tegories=DevOps+Reports&textSearch=DevOps&minVersion=13_08) so you can compare your highest and lowest-performing teams and see how much of the DevOps lifecycle each one is embracing.)\n\nBragging rights aside, a personal -- and not insignificant -- benefit of being on an elite DevOps team is that your [company value](/blog/a-look-at-devops-salaries/), as well as your [salary](/blog/four-tips-to-increase-your-devops-salary/), would likely rise, as would your ability to be hired at a top-tier company. \n\nSo what does it mean to be an elite DevOps team and what does it take to get there? Let’s dive in:\n\n## The benefits of being an elite team\n\nAccording to the DORA report there are specific things elite teams are able to consistently do. Here’s a look at some big goals:\n\n### Deploy more frequently\n\nElite performers deploy code 973 times more frequently than low performers, the survey notes. That’s right -- 973 times more. Low performers say they require a change lead time greater than six months. In sharp contrast, elite teams only need an hour. We’ll do the math for you: Elite teams have a 6,570 times faster lead time from commit to deploy than low performers.\n\n### Recover quicker\n\nThere’s a similar broad gap between low performers and elite teams when it comes to stability. DORA notes the time it takes the elite group to restore service is less than one hour, compared to more than six months for the low performers. \n\n### Lower change failure rates\n\nWhen it comes to change failure rates, there’s a 3 times difference between top and bottom performers. That means the elite group’s changes are a third less likely to fail. \n\n## DORA’s tips on how to become an elite team\n\nThose are great goals but how do you make them a reality? These six tips will take you in the right direction\n\n### 1. Make smart use of hybrid and multi-cloud environments\n\nDORA survey respondents who use either hybrid cloud or [multi-cloud](/topics/multicloud/) environments were 1.6 times more likely to beat their company’s performance targets than those who did not use these cloud setups. Multi-cloud users, for instance, say they are able to leverage each cloud provider’s unique benefits and achieve greater availability.\n\n### 2. How you implement the cloud matters\n\nWhen it comes to being able to support business needs, how the cloud is adopted and implemented makes a big difference. There’s a lot of benefit to adhering to the National Institute of Standards and Technology’s (NIST) [five essentials of cloud computing](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.500-291r2.pdf): on-demand self-service, broad network access, resource pooling, rapid elasticity or expansion, and measured service. DORA noted elite performers were 3.5 times more likely to have met all essential NIST cloud characteristics.\n\n### 3. Let DevOps and SRE complement each other\n\nTop DevOps professionals understand they don’t have to choose between DevOps and [site reliability engineering (SRE)](https://handbook.gitlab.com/job-families/engineering/infrastructure/site-reliability-engineer/). They work well together. “Elite performers are 2.1x as likely to report the use of SRE practices as their low-performing counterparts,” the DORA report notes. “But even teams operating at the highest levels have room for growth: Only 10% of elite respondents indicated that their teams have fully implemented every SRE practice we investigated.”\n\n### 4. Make sure you’re documenting\n\nThere’s a direct correlation between creating documents, which include everything from manuals to code comments, to a DevOps team’s success. Solid documentation is accurate, up-to-date, comprehensive, searchable, well organized and clear. The report points out that teams with good documentation are 2.4 times more likely to meet or exceed their reliability targets, and 2.5 times more likely to fully leverage the cloud.\n\n### 5. Build in security throughout development\n\nSecurity can get [a lot of lip service in DevOps](/blog/developer-security-divide/), but the best teams know that high delivery and operational performance are directly linked to integrating security practices throughout their development process. Security reviews must be integrated into every phase and applied to all major features, security professionals must be included in planning and development, and security testing must be automated.\n\n### 6. Pay attention to your team culture\n\nIn short, culture matters -- a lot. Industry surveys consistently show that culture is one of the top drivers of IT performance. Professionals who [have a sense of belonging and inclusion](/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen/), and who work collaboratively and cross-functionally, produce higher software delivery and operational performance. \n\nAccording to the DORA report, it’s clear that becoming an elite team of DevOps professionals is an attainable goal. The report notes a dramatic increase in the percentage of elite professionals this year: 26% (of 1,200 surveyed), up from just 7% in 2018. \n\nIt’s time to up your game or risk being left behind.\n\n_For a slightly different look at aspirational DevOps results, read our [2021 Global DevSecOps Survey](/developer-survey/)._\n\n",[1128,1408,2096],{"slug":3306,"featured":6,"template":683},"how-to-make-your-devops-team-elite-performers","content:en-us:blog:how-to-make-your-devops-team-elite-performers.yml","How To Make Your Devops Team Elite Performers","en-us/blog/how-to-make-your-devops-team-elite-performers.yml","en-us/blog/how-to-make-your-devops-team-elite-performers",{"_path":3312,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3313,"content":3318,"config":3323,"_id":3325,"_type":16,"title":3326,"_source":18,"_file":3327,"_stem":3328,"_extension":21},"/en-us/blog/migrating-repositories-to-gitlab-just-became-easier",{"title":3314,"description":3315,"ogTitle":3314,"ogDescription":3315,"noIndex":6,"ogImage":3183,"ogUrl":3316,"ogSiteName":697,"ogType":698,"canonicalUrls":3316,"schema":3317},"Migrating repositories to GitLab just became easier","Automate data and user migration into GitLab using open core software Congregate.","https://about.gitlab.com/blog/migrating-repositories-to-gitlab-just-became-easier","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Migrating repositories to GitLab just became easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bryan May\"}],\n        \"datePublished\": \"2021-10-26\",\n      }",{"title":3314,"description":3315,"authors":3319,"heroImage":3183,"date":3302,"body":3321,"category":14,"tags":3322},[3320],"Bryan May","\n\nAs customers begin their journey with GitLab, they often start by moving their source code repositories to GitLab. The GitLab Professional Services team has been helping customers with large scale [migrations](/services/migration/) for years and during this time have built a utility to automate the migration process - [Congregate](https://gitlab.com/gitlab-org/professional-services-automation/tools/migration/congregate#congregate). To ensure we’re aligned with the [GitLab values](https://handbook.gitlab.com/handbook/values/) of transparency and collaboration, we’re making it available to customers and partners. As of today, Congregate has been moved to a [source available](https://en.wikipedia.org/wiki/Source-available_software) disposition. \n\nFor smaller customers this might not be too important because they can use [GitLab import functionality](https://docs.gitlab.com/ee/user/project/settings/import_export.html) to migrate themselves. But for customers moving hundreds or thousands of source code repositories and associated users to GitLab, this is a game changer. And perhaps most importantly, our growing team of [channel services partners](https://partners.gitlab.com/) can now leverage Congregate functionality as they help customers move data. And for each contribution that partners or customers make back to Congregate, the larger [community](/community/) benefits. \n\n## Why are you doing this? Customers pay you for these services!\n\nGitLab Professional Services falls under the umbrella of Customer Success and its [mission](https://about.gitlab.com/handbook/customer-success/#mission-statement) is to _deliver value to all customers by engaging in a consistent, repeatable, scalable way across defined segments so that customers see the value in their investment with GitLab_. While Professional Services needs to maintain a balanced business (we are not a cost center), we believe that our paramount goal is to help our customers. As GitLab grows and the number of customers also increases, we will rely more heavily on our channel partners. We see making Congregate source available as a means to reach the largest quantity of customers with the highest quality migration service offering. Similar to how GitLab has [over 3,000 contributions from the wider community](/blog/3000-contributors-post/), we think welcoming contributions for this migration tool will help ensure GitLab and its partners converge on a single solution rather than diverging to many. \n\n## What do you mean by _Source Available_?\n\nCongregate will be licensed under the GitLab EE license. For services partners and customers, this means:\n\n![Legal Guidelines](https://about.gitlab.com/images/blogimages/2021-10-20-migration-automation/legal-guidelines2.png)\n\n## I’m a partner, will GitLab PS support my migration?\n\n- No, but support is available on a fee-based engagement. As an example if you have a customer migration that you need support on, you can engage GitLab PS as the Prime and GitLab will work with you to subcontract the engagement to you and provide the  necessary support.  \n- If a Partner is using Congregate on its own contract directly with the customer, GitLab PS will not provide support for Congregate. As such, Congregate is a USE AT YOUR OWN RISK tool. \n- Customer engagements on a partner contract intending to migrate to gitlab.com can be subcontracted to GitLab PS to help with these migration activities. \n\n_Note: GitLab PS will always need to be involved for migrations to gitlab.com as certain elevated privileges are required to maintain data integrity._\n\n## It's just a bunch of scripted API calls, what's so special?\n\nCongregate is using all of the published APIs so there isn’t a ton of “secret sauce” in the project. However, we have spent time optimizing for performance using multiprocessing techniques to reduce the time it takes to gather and push data. We’ve also created a standard logging format to provide auditability of what happened during a migration. Congregate can migrate data from many popular source systems to help the majority of our prospects and customers move to GitLab.  \n\n## How can I use it?\n\nWe are releasing a learning path for partners (or customers) to earn a [certified GitLab migration engineer badge](https://gitlab.badgr.com/public/badges/zzzdONLxRaCW5cDQSlHsgw). This learning journey will initially be released to GitLab team members and partners. It will include general information about importing data into GitLab, quizzes and exams to validate your knowledge, and a hands-on workshop where you will use Congregate to move data to a test GitLab instance. Once you pass, you will receive a badge that you can post in a **#humblebrag** to your social media network - that's what social media is for, right? We recommend going through this training to understand how to use Congregate. As a partner, you can access this certification learning journey [here](https://partners.gitlab.com/prm/English/c/Training). \n\n![Certified Migration Services Engineer](https://about.gitlab.com/images/blogimages/2021-10-20-migration-automation/migration-badge.png){: .shadow.center}\n\n\n\n\n",[1128,233,1488],{"slug":3324,"featured":6,"template":683},"migrating-repositories-to-gitlab-just-became-easier","content:en-us:blog:migrating-repositories-to-gitlab-just-became-easier.yml","Migrating Repositories To Gitlab Just Became Easier","en-us/blog/migrating-repositories-to-gitlab-just-became-easier.yml","en-us/blog/migrating-repositories-to-gitlab-just-became-easier",{"_path":3330,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3331,"content":3337,"config":3342,"_id":3344,"_type":16,"title":3345,"_source":18,"_file":3346,"_stem":3347,"_extension":21},"/en-us/blog/beginner-guide-python-programming",{"title":3332,"description":3333,"ogTitle":3332,"ogDescription":3333,"noIndex":6,"ogImage":3334,"ogUrl":3335,"ogSiteName":697,"ogType":698,"canonicalUrls":3335,"schema":3336},"How to get started with Python programming","Python is increasingly popular, and for good reason. Here's our beginner's guide.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664962/Blog/Hero%20Images/python.jpg","https://about.gitlab.com/blog/beginner-guide-python-programming","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to get started with Python programming\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-10-21\",\n      }",{"title":3332,"description":3333,"authors":3338,"heroImage":3334,"date":3339,"body":3340,"category":14,"tags":3341},[1524],"2021-10-21","Are you a programming enthusiast who wants to learn Python? Are you new to coding? Do you need help deciding where to begin with Python? If you are looking for answers to these questions, then you are in the right place.\n\n## How to start writing code with Python\n\nPython is an easy-to-learn, easy-to-use and easy-to-deploy programming language, with rampant usage in building web and desktop applications, analyzing data and performing [DevOps](https://about.gitlab.com/topics/devops/) tasks. It is a free, open-source, object-oriented coding language used to write simple scripts and complex programs. Of the almost 700 programming languages, Python is considered one of the best to learn first.\n\n## Installing Python\n\nBefore discussing the basics of Python, it is essential to download and install Python on your desktop/laptop. Python works on multiple platforms, including Linux, Windows and Mac. It comes preinstalled on most Mac and Linux systems; however, you should download the latest version from the official Python website.\n\nTo check the current Python version on your system, open the command line and type “python -V”. \n\n![command prompt](https://about.gitlab.com/images/blogimages/python1.png){: .shadow}\n\nIf you have an outdated version, download either the 32- or 64-bit setup from the website based on your system requirements.\n\nThere are other alternatives for downloading the setup: for Windows, you can install it directly from Microsoft. For Linux, install it using the package manager. For macOS, you can download it from Homebrew.\n\nOnce the setup is downloaded, run the file installer, and click on “Install Now”. Once the installation is complete, you are ready to go. Below is an example of a Python installation for Windows.\n\n![install Python](https://about.gitlab.com/images/blogimages/python2.png){: .shadow}\n\n## Running Python in command prompt\n\nTo verify Python is installed and working correctly in Windows, open the command prompt and enter “python”, which will invoke the interpreter. You can directly execute Python codes in it.  For example, type “2*5+1” and press “enter”. You will see “11” as the output. Entering “quit ()” will exit the interpreter.\n\n![Python interpreter](https://about.gitlab.com/images/blogimages/python3.png){: .shadow}\n\n## Running Python in IDE\n\nWith the latest Python installed, you are now ready to start programming in Python. When writing long scripts or programs in Python, use Python’s built-in Integrated Development and Learning Environment (IDLE).\n\nStart the IDLE and then, from the File dropdown, select “New File”, which opens a new editing window. So now, on your screen, you have two windows: a Python shell and an untitled file.\n\n![Python shell and untitled file](https://about.gitlab.com/images/blogimages/python4.png){: .shadow}\n\nThe Python shell is a REPL environment, which is shorthand for \"read-eval-print loop\". It runs snippets of the code, usually one statement at a time. For example, by repeating the same calculation “2*5+1” that we did in the command prompt, you can see how a Python shell can function as a calculator.\n\n![Python as a calculator](https://about.gitlab.com/images/blogimages/python5.png){: .shadow}\n\nThe untitled window is a text editing window for writing complete programs. The shell displays its output. For example, the conventional first program of Python for beginners is printing “Hello World!”. Make sure you save the text editor before running it by pressing “F5”.\n\n![Hello World](https://about.gitlab.com/images/blogimages/python61.png){: .shadow}\n\n## The basics of Python\n\nWe know you can’t wait to start writing long scripts for games and websites, but you still have a long way to get there. Just like with learning any other language, you must first understand the basics of Python. \n\nThe **print()** function, as seen in the Hello World! example, prints a value on the output window. A value is the most basic thing a program uses. It can be a string, a numeric value or any other Python object. Any object within single/double quotations is called a string. For instance, the “Hello World!” that is printed in the above program is also of the type string. Numeric values like 4 and 4.5 are the types of integers and floats, respectively. You can change an integer or float into a string and vice versa using the built-in functions **int()**, **float()** and **str()**.\n\n![value in an output window](https://about.gitlab.com/images/blogimages/python7.png){: .shadow}\n\n## Python’s vocabulary\n\nPython is the simplest coding language. It is easy to read and understand. Unlike human languages, Python has a small vocabulary or reserved words holding special meaning. Terms other than this reserved vocabulary hold meaning only to you and are called variables. These 35 reserved words are:\n\n![Python terms](https://about.gitlab.com/images/blogimages/python8.png){: .shadow}\n\nMake sure you use these words for their specified purpose to avoid confusing the Python interpreter and causing a syntax error.\n\n### Naming variables\n\nSometimes you want to store values in your code for retrieving them later, which you can do by giving them symbolic names called variables. As seen below, we ask Python to store 5 and 6 with labels x and y, respectively, and then retrieve them later to find their sum.\n\n![storing variables](https://about.gitlab.com/images/blogimages/python9.png){: .shadow}\n\nThere are rules for choosing a name for a variable; failing to follow these gives a syntax error. A few mandatory rules are narrated below:\n\n1. The name can contain both letters and numbers, but it can’t start with a number.\n1. An underscore can appear in the name to separate multiple words.\n1. Special symbols like @#$ are illegal and should not appear in the name.\n1. Python keywords should not be used as names for variables.\n\n### Understanding operators and operands\n\nPython uses special symbols called “operators” for representing basic mathematical computation. The values to which these operators are applied are called operands. The symbols used as operators for subtraction, addition, division, multiplication and exponentiation are  -,+, /, * and **, respectively. \n\n![symbols for operators](https://about.gitlab.com/images/blogimages/python10.png){: .shadow}\n\nThe modulus operator (%) outputs the remainder of the first operand divided by the second operand. It is useful in checking whether a number is divisible by another and extracting the rightmost digit/digits of a number.\n\n![modulus operator](https://about.gitlab.com/images/blogimages/python11.png){: .shadow}\n\n### Using expressions\n\nA combination of values, variables and operators is called an expression. An expression typed in the shell gets evaluated, and the answer is displayed. However, in a script, an expression doesn't do anything on its own.\n\nPython uses the mathematical convention PEMDAS for the operators, which means that P for Parentheses has the highest precedence, then Exponentiation, Multiplication and Division, which have the same priority. Addition and Subtraction come next and also have the same precedence. Operators that have the same preference are also evaluated from left to right.\n\n![PEMDAS](https://about.gitlab.com/images/blogimages/python12.png){: .shadow}\n\nThe Addition and Multiplication operators also work with strings for concatenation and repeating a string, respectively.\n\n![addition and multiplication operators](https://about.gitlab.com/images/blogimages/python13.png){: .shadow}\n\nPython also allows you to take the value for a variable from the user via their keyboard. This can be done using a built-in function called **input**.\n\n![input](https://about.gitlab.com/images/blogimages/python14.png){: .shadow}\n\n## Write your first program\n\nNow it's time to write a short program using everything you've learned here. Write a script that takes two numbers as input and adds them. Do this on your own and see the code below to tally your work.\n\n![write a short program](https://about.gitlab.com/images/blogimages/python15.png){: .shadow}\n\n**Congratulations!** You just wrote your first program.\n\nLearning Python is easy and fun. We just helped you make it through the basics. To become a professional Python Programmer, you still have a lot to learn and practice. Good luck on your journey to becoming an expert coder.\n\nPhoto by \u003Ca href=\"https://unsplash.com/@davidclode?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">David Clode\u003C/a> on \u003Ca href=\"https://unsplash.com/s/photos/python?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>",[1128,1408,749],{"slug":3343,"featured":6,"template":683},"beginner-guide-python-programming","content:en-us:blog:beginner-guide-python-programming.yml","Beginner Guide Python Programming","en-us/blog/beginner-guide-python-programming.yml","en-us/blog/beginner-guide-python-programming",{"_path":3349,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3350,"content":3356,"config":3361,"_id":3363,"_type":16,"title":3364,"_source":18,"_file":3365,"_stem":3366,"_extension":21},"/en-us/blog/four-tips-to-increase-your-devops-salary",{"title":3351,"description":3352,"ogTitle":3351,"ogDescription":3352,"noIndex":6,"ogImage":3353,"ogUrl":3354,"ogSiteName":697,"ogType":698,"canonicalUrls":3354,"schema":3355},"Four tips to increase your DevOps salary","You have a great career with a solid salary, but can you do better? (Hint: of course.) Here's how.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668124/Blog/Hero%20Images/moneyfarm_background.jpg","https://about.gitlab.com/blog/four-tips-to-increase-your-devops-salary","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Four tips to increase your DevOps salary\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2021-10-20\",\n      }",{"title":3351,"description":3352,"authors":3357,"heroImage":3353,"date":3358,"body":3359,"category":14,"tags":3360},[1364],"2021-10-20","\n\n_This is the second in an occasional series looking at DevOps salaries and careers. Find out [how your DevOps salary stacks up](/blog/a-look-at-devops-salaries/)._\n\nSalaries for DevOps professionals are strong, despite a pandemic and a global economic crisis. However, you can still command an even higher salary with four straightforward strategies.\n\nVarious surveys have shown the IT industry is thriving right now and DevOps professionals, in particular, are [increasing in demand and value](https://about.gitlab.com/blog/a-look-at-devops-salaries/). DevOps repeatedly ranks well on some reputable lists such as Robert Half’s [15 highest paying IT jobs](https://www.roberthalf.com/blog/salaries-and-skills/the-13-highest-paying-it-jobs-in-2019) and Glassdoor’s 2021 list of [Best Jobs in America](https://www.glassdoor.com/research/best-jobs-in-america-for-2021/). \n\nIn [an August jobs report](https://www.prnewswire.com/news-releases/nationwide-tech-hiring-surges-in-second-quarter-per-dice-q2-tech-job-report-301351520.html), Dice CEO Art Zeile called this “one of the hottest tech job markets since the dot-com era,” and pointed to the upward trend in tech job postings since November 2020.\n\n## How to increase your salary\n\nBy following these strategies, DevOps professionals can take advantage of this strong market to boost your paychecks.\n\n### 1. Gain more experience\n\nExperience level is a big driver when it comes to how much money DevOps professionals will be taking home. [The Randstad 2021 Salary Guide](https://rlc.randstadusa.com/for-business/learning-center/salary-insights/salary-guide/IT-technologies) shows a more than $27,000 difference between the annual salary of a DevOps developer with one year of experience ($112,785) and someone with five years of experience ($140,242). An additional 10 years of experience can garner another $25,000 bump, according to the Randstad Salary Guide.\n\nExperience doesn’t have to happen sequentially, however. In our [2021 Global DevSecOps Survey](/developer-survey/) we found more than  69% of respondents participate in “sideline” open source projects. Those extracurricular efforts can look great on a resume and also are a way to showcase niche skills.\n\n### 2. Expand your education\n\nEmployers also are looking for DevOps professionals to continue to increase their skill set, such as learning new coding languages and scripting skills, according to Glassdoor and Robert Half.\nDevOps professionals also should stay up-to-date on new frameworks, automation, data management and security systems. And don’t forget the importance of analytics skills, configuration management and DevOps platforms. As we all know, technology is a moving target and being able to not only use the latest technology but also explain its importance to executives and other business leaders will make you a more valuable employee.\n\n### 3. Pursue certifications\n\nWant to show your employer - or a future employer - that you have the skills to work on a business-critical DevOps platform? The proof is sometimes in the certification. Think about getting certified in [Kubernetes](https://training.linuxfoundation.org/certification/certified-kubernetes-application-developer-ckad/), [Docker](https://prod.examity.com/docker/), Puppet or [Ansible](https://www.redhat.com/en/services/training/ex407-red-hat-certified-specialist-in-ansible-automation-exam?section=Overview). And of course there’s an option to become a [GitLab Certified Associate](https://about.gitlab.com/services/education/gitlab-certified-associate/). Certifications help an employer understand your functional knowledge of their business systems.\n\n### 4. Improve your soft skills \n\nYes, it’s critical that you know how to make the technology work and how to keep projects running on time and on budget, but you also should concentrate on “soft skills,” like communication, collaboration and leadership, if you’re aiming to qualify for a better salary. In 2020 our survey takers all agreed that soft skills were the most important thing for their future careers, and they remained the second choice of most survey takers this year as well. \n\nCompanies need professionals who understand the business’ needs, can communicate how a DevOps platform can solve key challenges and can explain the competitive advantage gained from a strong DevOps strategy. Soft skills enable professionals to operate as a team, endure stressful moments and work through difficult problems.\n\nDevOps professionals are in demand, putting you in a strong earning position. So make sure you are doing all you can to show you deserve a higher salary.\n\n## Read more on DevOps careers: \t\t\n\n- [Best advice for your DevOps career? Keep on learning](/blog/best-advice-for-your-devops-career-keep-on-learning/)\n\n- [6 tips to make software developer hiring easier](/blog/6-tips-to-make-software-developer-hiring-easier/)\n\n- [DevOps salaries in 2021: Where do you rank?](/blog/a-look-at-devops-salaries/)\n\n- [Have DevOps jobs to fill? Try these 3 strategies to hire and retain](/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain/)\n\n",[1408,1128,1786],{"slug":3362,"featured":6,"template":683},"four-tips-to-increase-your-devops-salary","content:en-us:blog:four-tips-to-increase-your-devops-salary.yml","Four Tips To Increase Your Devops Salary","en-us/blog/four-tips-to-increase-your-devops-salary.yml","en-us/blog/four-tips-to-increase-your-devops-salary",{"_path":3368,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3369,"content":3375,"config":3381,"_id":3383,"_type":16,"title":3384,"_source":18,"_file":3385,"_stem":3386,"_extension":21},"/en-us/blog/top-10-gitlab-hacks",{"title":3370,"description":3371,"ogTitle":3370,"ogDescription":3371,"noIndex":6,"ogImage":3372,"ogUrl":3373,"ogSiteName":697,"ogType":698,"canonicalUrls":3373,"schema":3374},"Top ten GitLab hacks for all stages of the DevOps Platform","Get the most out of the GitLab DevOps Platform with our ten best tips for enhanced productivity.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667482/Blog/Hero%20Images/cover-image-unsplash.jpg","https://about.gitlab.com/blog/top-10-gitlab-hacks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top ten GitLab hacks for all stages of the DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2021-10-19\",\n      }",{"title":3370,"description":3371,"authors":3376,"heroImage":3372,"date":3378,"body":3379,"category":14,"tags":3380},[3377],"Michael Friedrich","2021-10-19","\nIt's been ten years since the first commit to GitLab, so we are sharing our ten favorite GitLab hacks to help you get the most out of our DevOps Platform. These are tips for all stages of the development lifecycle, so roll up your sleeves and let's get started.\n\n## Manage faster with quick actions\n\nYou might have adopted keyboard shortcuts for faster navigation and workflows already - if not, check out the GitLab documentation for [platform specific shortcuts](https://docs.gitlab.com/ee/user/shortcuts.html). The knowledge of pressing `r` to land in the reply to comment in text form can be combined with other quick actions, including:\n\n```\n/assign_reviewer @ \u003Csearch username>\n\n/label ~ \u003Csearch label>\n/label ~enhancement ~workflow::indev\n\n/due Oct 8\n\n/rebase\n\n/approve\n\n/merge \n```\n\nQuick actions are also helpful if you have to manage many issues, merge requests and epics at the same time. There are specific actions which allow you to duplicate existing issues, as one example. \n\nTake a deeper dive into [Quick Actions](/blog/improve-your-gitlab-productivity-with-these-10-tips/). \n\n## Plan instructions with templates\n\nDon’t fall into the trap of back-and-forth with empty issue descriptions that leave out details your development teams need to reproduce the error in the best way possible. \n\nGitLab provides the possibility to use so-called [description templates](https://docs.gitlab.com/ee/user/project/description_templates.html) in issues and merge requests. Next to providing a structured template with headings, you can also add [task lists](https://docs.gitlab.com/ee/user/markdown.html#task-lists) which can later be ticked off by the assignee. Basically everything is possible and is supported in GitLab-flavored markdown and HTML.\n\nIn addition to that, you can combine the static description templates with quick actions. This allows you to automatically set labels, assignees, define due dates, and more to level up your productivity with GitLab. \n\n```\n\u003C!-- \nThis is a comment, it will not be rendered by the Markdown engine. You can use it to provide instructions how to fill in the template.\n--> \n\n### Summary \n\n\u003C!-- Summarize the bug encountered concisely. -->\n\n### Steps to reproduce\n\n\u003C!-- Describe how one can reproduce the issue - this is very important. -->\n\n### Output of checks\n\n\u003C!-- If you are reporting a bug on GitLab.com, write: This bug happens on GitLab.com -->\n\n#### Results of GitLab environment info\n\n\u003C!--  Input any relevant GitLab environment information if needed. -->\n\n\u003Cdetails>\n\u003Csummary>Expand for output related to app info\u003C/summary>\n\n\u003Cpre>\n\n(Paste the version details of your app here)\n\n\u003C/pre>\n\u003C/details>\n\n### Possible fixes\n\n\u003C!-- If you can, link to the line of code and suggest actions. →\n\n## Maintainer tasks\n\n- [ ] Problem reproduced\n- [ ] Weight added\n- [ ] Fix in test\n- [ ] Docs update needed\n\n/label ~\"type::bug\"\n```\n\nWhen you manage different types of templates, you can pass along the name of the template in the `issuable_template` parameter, for example `https://gitlab.com/gitlab-org/gitlab/-/issues/new?issuable_template=Feature%20proposal%20%23%20lean`. \n\nAt GitLab, we use description and merge request templates in many ways: [GitLab the project](https://gitlab.com/gitlab-org/gitlab/-/tree/master/.gitlab/issue_templates), [GitLab Corporate Marketing team](https://gitlab.com/gitlab-com/marketing/corporate_marketing/corporate-marketing/-/tree/master/.gitlab/issue_templates), [GitLab team member onboarding](https://gitlab.com/gitlab-com/people-group/people-operations/employment-templates/-/tree/master/.gitlab/issue_templates) and [GitLab product team](https://gitlab.com/gitlab-com/Product/-/tree/main/.gitlab/issue_templates) are just a few examples.\n\n## Create with confidence \n\nWhen reading GitLab issues and merge requests, you may see the abbreviation `MWPS` which means `Merge When Pipeline Succeeds`. This is an efficient way to merge the MRs when the pipeline passes all jobs and stages - you can even combine this workflow with [automatically closing issues](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically) with keywords from the MR.\n\n`Merge When Pipeline Succeeds` also works on the CLI with the `git` command and [push options](https://docs.gitlab.com/ee/user/project/push_options.html). That way you can create a merge request from a local Git branch, and set it to merge when the pipeline succeeds.\n\n```shell\n# mwps BRANCHNAME\nalias mwps='git push -u origin -o merge_request.create -o merge_request.target=main -o merge_request.merge_when_pipeline_succeeds'\n```\n\nCheckout [this ZSH alias example](https://gitlab.com/sytses/dotfiles/-/blob/745ef9725a859dd759059f6ce283e2a8132c9b00/git/aliases.zsh#L24) in our CEO [Sid Sijbrandij](/company/team/#sytses)’s dotfiles repository. There are more push options available, and even more Git CLI tips in [our tools & tips handbook](https://handbook.gitlab.com/handbook/tools-and-tips/#terminal). One last tip: Delete all local branches where the remote branch was deleted, for example after merging a MR.\n\n```shell\n# Delete all remote tracking Git branches where the upstream branch has been deleted\nalias git_prune=\"git fetch --prune && git branch -vv | grep 'origin/.*: gone]' | awk '{print \\$1}' | xargs git branch -d\"\n```\n\nYou are not bound to your local CLI environment; take it to the cloud with [Gitpod](/blog/teams-gitpod-integration-gitlab-speed-up-development/) and either work in VS Code or the pod terminal. \n\n## Verify your CI/CD pipeline\n\nRemember the old workflow of committing a change to `.gitlab-ci.yml` just to see if it was valid, or if the job template really inherits all the attributes? This has gotten a whole lot easier with our new [pipeline editor](https://docs.gitlab.com/ee/ci/pipeline_editor/). Navigate into the `CI/CD` menu and start building CI/CD pipelines right away.\n\nBut the editor is more than just another YAML editor. You’ll get live linting, allowing you to know if there is a missing dash for array lists or a wrong keyword in use before you commit. You can also preview jobs and stages or asynchronous dependencies with `needs` to make your pipelines more efficient.\n\nThe pipeline editor also uses uses the `/ci/lint` API endpoint, and fetches the merged YAML configuration I described earlier in [this blog post about jq and CI/CD linting](/blog/devops-workflows-json-format-jq-ci-cd-lint/). That way you can quickly verify that job templates with [extends](https://docs.gitlab.com/ee/ci/yaml/#extends) and [!reference tags](https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html#reference-tags) work in the way you designed them. It also allows you to unfold included files, and possible job overrides (for example changing the stage of an [included SAST security template](https://docs.gitlab.com/ee/user/application_security/sast/#overriding-sast-jobs)).\n\nLet’s try a quick example – create a new project and new file called `server.c` with the following content: \n\n```\n#include \u003Cstdio.h>\n#include \u003Cstring.h>\n#include \u003Csys/mman.h>\n#include \u003Csys/stat.h>\n#include \u003Cunistd.h>\n\nint main(void) {\n    size_t pagesize = getpagesize();\n    char * region = mmap(\n        (void*) (pagesize * (1 \u003C\u003C 20)),\n        pagesize,\n        PROT_READ|PROT_WRITE|PROT_EXEC,\n        MAP_ANON|MAP_PRIVATE, 0, 0);\n\n    strcpy(region, \"Hello GitLab SAST!\");\n    printf(\"Contents of region: %s\\n\", region);\n\n    FILE *fp;\n    fp = fopen(\"devops.platform\", \"r\");\n    fprintf(fp, \"10 years of GitLab 🦊 🥳\");\n    fclose(fp);\n    chmod(\"devops.platform\", S_IRWXU|S_IRWXG|S_IRWXO);\n\n    return 0;\n}\n```\n\nOpen the CI/CD pipeline editor and add the following configuration, with an extra `secure` stage assigned to the `semgrep-sast` job for SAST and the C code. \n\n```yaml\nstages:\n    - build\n    - secure\n    - test\n    - deploy\n\ninclude:\n    - template: Security/SAST.gitlab-ci.yml\n\nsemgrep-sast:\n    stage: secure\n```\n\nInspect the `Merged YAML tab` to see the fully compiled CI/CD configuration. You can commit the changes and check the found vulnerabilities too as an async practice :). The examples are available in [this project](https://gitlab.com/gitlab-de/playground/sast-10y-example).\n\n![CI/CD Pipeline editor - Merged YAML](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_pipeline_editor_view_merged_yaml.png)\nVerify the stage attribute for the job by opening the `view merged YAML` tab in the CI/CD pipeline editor.\n{: .note.text-center}\n\n## Package your applications\n\nThe [package registry](https://docs.gitlab.com/ee/user/packages/) possibilities are huge and there are more languages and package managers to come. Describing why Terraform, Helm, and containers (for infrastructure) and Maven, npm, NuGet, PyPI, Composer, Conan, Debian, Go and Ruby Gems (for applications) are so awesome would take too long, but it's clear there are plenty of choices. \n\nOne of my favourite workflows is to use existing CI/CD templates to publish container images in the GitLab container registry. This makes continuous delivery much more efficient, such as when deploying the application into your Kubernetes cluster or AWS instances. \n\n```yaml\ninclude:\n  - template: 'Docker.gitlab-ci.yml'\n```\n\nIn addition to including the CI/CD template, you can also override the job attributes and define a specific stage and manual non-blocking rules.\n\n```yaml\nstages:\n  - build\n  - docker-build\n  - test\n\ninclude:\n  - template: 'Docker.gitlab-ci.yml'\n\n# Change Docker build to manual non-blocking\ndocker-build:\n  stage: docker-build\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH'\n      when: manual \n      allow_failure: true\n```\n\nFor celebrating #10YearsOfGitLab, we have created a [C++ example](https://gitlab.com/gitlab-de/cicd-tanuki-cpp) with an Easter egg on time calculations. This project also uses a Docker builder image to showcase a more efficient pipeline. Our recommendation is to learn using the templates in a test repository, and then create a dedicated group/project for managing all required container images. You can think of builder images which include the compiler tool chain, or specific scripts to run end-to-end tests, etc. \n\n## Secure your secrets\n\nIt is easy to leak a secret by making choices that uncomplicate a unit test by running it directly with the production database. The secret persists in git history, and someone with bad intentions gains access to private data, or finds ways to exploit your supply chain even further. \n\nTo help prevent that, include the CI/CD template for secret detection. \n\n```yaml\nstages:\n    - test\n\ninclude:\n  - template: Security/Secret-Detection.gitlab-ci.yml  \n```\n\nA known way to leak secrets is committing the `.env` file which stores settings and secrets in the repository. Try the following snippet by adding a new file `.env` and create a merge request.\n\n```\nexport AWS_KEY=\"AKIA1318109798ABCDEF\"\n```\n\nInspect the reports JSON to see the raw reports structure. GitLab Ultimate provides an MR integration, a security dashboard overview, and more features to take immediate action. The example can be found in [this project](https://gitlab.com/gitlab-de/playground/secret-scanning-10y-example).\n\n![Secrets Scanning in MR](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_secrets_scanning.png)\nMR detail view with detected AWS secret from security scanning\n{: .note.text-center}\n\n## Release and continuously deliver (CD)\n\nGitLab’s release stage provides many [features](https://about.gitlab.com/handbook/product/categories/features/#release), including [canary deployments](https://docs.gitlab.com/ee/user/project/canary_deployments.html) and [GitLab pages](https://docs.gitlab.com/ee/user/project/pages/). There are also infrastructure deployments with Terraform and cloud native (protected) [environments](https://docs.gitlab.com/ee/ci/environments/). \n\nWhile working on a CI/CD pipeline efficiency workshop, I got enthusiastic about [parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html#parent-child-pipelines) allowing non-blocking child pipelines into production, with micro services in Kubernetes as one example. \n\nLet’s try it! Create a new project, and add 2 child pipeline configuration files: `child-deploy-staging.yml` and `child-deploy-prod.yml`. The naming is important as the files will be referenced in the main `.gitlab-ci.yml` configuration file later. The jobs in the child pipelines will sleep for 60 seconds to simulate a deployment. \n\nchild-deploy-staging.yml:\n\n```yaml\ndeploy-staging:\n    stage: deploy\n    script:\n        - echo \"Deploying microservices to staging\" && sleep 60\n```\n\nchild-deploy-prod.yml\n\n```yaml\ndeploy-prod:\n    stage: deploy\n    script:\n        - echo \"Deploying microservices to prod\" && sleep 60\n\nmonitor-prod:\n    stage: deploy\n    script:\n        - echo \"Monitoring production SLOs\" && sleep 60\n```\n\nNow edit the `.gitlab-ci.yml` configuration file and create a build-test-deploy stage workflow.\n\n```yaml\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  script: echo \"Build\"\n\ntest:\n  stage: test \n  script: echo \"Test\"\n\ndeploy-staging-trigger:\n  stage: deploy\n  trigger:\n    include: child-deploy-staging.yml\n  #rules:\n  #  - if: $CI_MERGE_REQUEST_ID\n\ndeploy-prod-trigger:\n  stage: deploy\n  trigger:\n    include: child-deploy-prod.yml\n    #strategy: depend\n  #rules:\n  #  - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH   \n```\n\nCommit the changes and inspect the CI/CD pipelines. \n\n![Parent-child Pipelines](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_parent_child_pipelines.png)\nView parent-child pipelines in GitLab\n{: .note.text-center}\n\n`strategy: depends` allows you to make the child pipelines blocking again, and the parent child pipeline waits again. Try uncommenting this for the prod job, and verify that by inspecting the pipeline view. [Rules](https://docs.gitlab.com/ee/ci/yaml/#rules) allow refining the scope when jobs are being run, such as when staging child pipelines that should only be run in merge requests and the prod child pipeline only gets triggered when on the default main branch. The full example can be found in [this project](https://gitlab.com/gitlab-de/playground/parent-child-pipeline-10y-example).\n\nTip: You can use [resource_groups](/blog/introducing-resource-groups/) to limit production deployments from running concurrent child pipelines. \n\n## Configure your infrastructure\n\nTerraform allows you to describe, plan and apply the provisioning of infrastructure resources. The workflow requires a state file to be stored over steps, where the [managed state in GitLab](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html) as an HTTP backend is a great help, together with predefined container images and CI/CD templates to make [Infrastructure as code](https://docs.gitlab.com/ee/user/infrastructure/iac/) as smooth as possible.\n\nYou can customize the template, or copy the CI/CD configuration into .gitlab-ci.yml and modify the steps by yourself. Let’s try a quick example with only an AWS account and an IAM user key pair. Configure them as CI/CD variables in `Settings > CI/CD > Variables`: `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.\n\nNext, create the `backend.tf` file and specify the http backend and AWS module dependency.\n\n```terraform\nterraform {\n  backend \"http\" {\n  }\n\n  required_providers {\n    aws = {\n      source = \"hashicorp/aws\"\n      version = \"~> 3.0\"\n    }\n  }\n}\n```\n\nCreate `provider.tf` to specify the AWS region.\n\n```terraform\nprovider \"aws\" {\n  region = \"us-east-1\"\n}\n```\n\nThe `main.tf` describes the S3 bucket resources.\n\n```terraform\nresource \"aws_s3_bucket_public_access_block\" \"publicaccess\" {\n  bucket = aws_s3_bucket.demobucket.id\n  block_public_acls = false\n  block_public_policy = false\n}\n\nresource \"aws_s3_bucket\" \"demobucket\" {\n  bucket = \"terraformdemobucket\"\n  acl = \"private\"\n}\n```\n\nTip: You can verify the configuration locally on your CLI by commenting out the HTTP backend above.\n\nFor GitLab CI/CD, open the pipeline editor and use the following configuration: (Note that it is important to specify the `TF_ROOT` and `TF_ADDRESS` variables since you can [manage multiple Terraform state files](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html#configure-the-backend)). \n\n```yaml\nvariables:\n  TF_ROOT: ${CI_PROJECT_DIR}\n  TF_ADDRESS: ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/terraform/state/${CI_PROJECT_NAME}\n\ninclude:\n    - template: Terraform.latest.gitlab-ci.yml\n\nstages:\n  - init\n  - validate\n  - build\n  - deploy\n  - cleanup\n\ndestroy:\n    stage: cleanup\n    extends: .terraform:destroy \n    when: manual\n    allow_failure: true\n```\n\nCommit the configuration and inspect the pipeline jobs. \n\n![Terraform pipeline AWS S3 bucket](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_terraform_state_cicd_pipeline_aws_s3_bucket.png)\nAWS S3 bucket provisioned with Terraform in GitLab CI/CD \n{: .note.text-center}\n\nThe `destroy` job is not created in the template and therefore explicitly added as a manual job. It is recommended to review the opinionated Terraform CI/CD template and copy the jobs into your own configuration to allow for further modifications or style adjustments.  The full example is located in [this project](https://gitlab.com/gitlab-de/playground/terraform-aws-state-10y-example).\n\n![GitLab managed Terraform states](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_terraform_state_cicd_overview.png)\nView the Terraform states in GitLab\n{: .note.text-center}\n\nHat tipping to our Package stage - you can manage and publish [Terraform modules in the registry](https://docs.gitlab.com/ee/user/packages/terraform_module_registry/) too, using all of the DevOps Platform advantages. And hot off the press, the [GitLab Kubernetes Operator is generally available](/blog/open-shift-ga/). \n\n## Monitor GitLab and dive into Prometheus\n\nPrometheus is a monitoring solution which collects metrics from `/metrics` HTTP endpoints made available by applications, as well as so-called exporters to serve services and host information in the specified metrics format. One example is CI/CD pipeline insights to analyse bottlenecks and [make your pipelines more efficient](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html). The [GitLab CI Pipeline Exporter project](https://github.com/mvisonneau/gitlab-ci-pipelines-exporter/tree/main/examples/quickstart) has a great quick start in under 5 minutes, bringing up demo setup with Docker-compose, Prometheus and Grafana. From there, it is not far into your production monitoring environment, and monitoring more of GitLab. \n\n![GitLab CI Exporter](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_ci_pipeline_exporter_prometheus.png)\nExample dashboard for the GitLab CI Pipeline Exporter\n{: .note.text-center}\n\nThe Prometheus Exporter uses the [Go client libraries](https://prometheus.io/docs/instrumenting/writing_exporters/). They can be used to write your own exporter, or instrument your application code to expose `/metrics`. When deployed, you can use Prometheus again to monitor the performance of your applications in Kubernetes, as one example. Find more monitoring ideas in my talk “[From Monitoring to Observability: Left Shift your SLOs](https://docs.google.com/presentation/d/1LPb-HPMgbc8_l98VjMEo5d0uYlnNnAtJSURngZPWDdE/edit)”. \n\n## Protect\n\nYou can enable security features in GitLab by including the CI/CD templates one by one. A more easy way is to enable [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) and use the default best practices for [security scans](https://docs.gitlab.com/ee/user/application_security/index.html#security-scanning-with-auto-devops). This includes [container scanning](https://docs.gitlab.com/ee/topics/autodevops/stages.html#auto-container-scanning) ensuring that application deployments are not vulnerable on the container OS level. \n\nLet’s try a quick example with a potentially vulnerable image, and the Docker template tip from the Package stage above. Create a new `Dockerfile` in a new project:\n\n```yaml\nFROM debian:10.0 \n```\n\nOpen the pipeline editor and add the following CI/CD configuration:\n\n```yaml\n# 1. Automatically build the Docker image\n# 2. Run container scanning. https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html\n# 3. Inspect `Security & Compliance > Security Dashboard`\n\n# For demo purposes, scan the latest tagged image from 'main'\nvariables:\n    DOCKER_IMAGE: $CI_REGISTRY_IMAGE:latest    \n\ninclude:\n    - template: Docker.gitlab-ci.yml\n    - template: Security/Container-Scanning.gitlab-ci.yml\n```\n\nThe full example is located in [this project](https://gitlab.com/gitlab-de/playground/container-scanning-10y-example).\n\nTip: Learn more about [scanning container images in a deployed Kubernetes cluster](https://docs.gitlab.com/ee/user/application_security/container_scanning/) to stay even more safe. \n\n![Container Scanning Vulnerability Report](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_container_scanning_vulnerability_report.png)\nView the container scanning vulnerability report\n{: .note.text-center}\n\n## What’s next?\n\nWe have tried to find a great “hack” for each stage of the DevOps lifecycle. There are more hacks and hidden gems inside GitLab - share yours and be ready to explore more stages of the DevOps Platform.\n\nCover image by [Alin Andersen](https://unsplash.com/photos/diUGN5N5Rrs) on [Unsplash](https://unsplash.com)\n",[749,1128,727],{"slug":3382,"featured":6,"template":683},"top-10-gitlab-hacks","content:en-us:blog:top-10-gitlab-hacks.yml","Top 10 Gitlab Hacks","en-us/blog/top-10-gitlab-hacks.yml","en-us/blog/top-10-gitlab-hacks",{"_path":3388,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3389,"content":3394,"config":3400,"_id":3402,"_type":16,"title":3403,"_source":18,"_file":3404,"_stem":3405,"_extension":21},"/en-us/blog/improve-cd-workflows-helm-chart-registry",{"title":3390,"description":3391,"ogTitle":3390,"ogDescription":3391,"noIndex":6,"ogImage":1560,"ogUrl":3392,"ogSiteName":697,"ogType":698,"canonicalUrls":3392,"schema":3393},"Get started with GitLab's Helm Package Registry","Improve CD workflows and speed up application deployment using our new Helm Package Registry.","https://about.gitlab.com/blog/improve-cd-workflows-helm-chart-registry","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get started with GitLab's Helm Package Registry\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Philip Welz\"}],\n        \"datePublished\": \"2021-10-18\",\n      }",{"title":3390,"description":3391,"authors":3395,"heroImage":1560,"date":3397,"body":3398,"category":14,"tags":3399},[3396],"Philip Welz","2021-10-18","\n\nIn our 14.1 release, we offered the ability to add Helm charts to the GitLab Package Registry. Here's everything you need to know to leverage application deployment with these new features.\n\n## The role of container images\n\nThe de-facto standard is to package applications into [OCI Images](https://github.com/opencontainers/image-spec) which are often just referred to as `container images` and more often as `Docker containers`. The [Open Container Initiative](https://opencontainers.org/) was launched in 2015 by Docker and other companies to define industry standards around container image formats and runtimes. GitLab introduced an OCI conform [Container Registry](/blog/gitlab-container-registry/) with the release of [GitLab 8.8](/releases/2016/05/22/gitlab-8-8-released/) in May 2016.\n\nToday, a common and widely adopted approach is to deploy applications with [Helm charts](https://helm.sh/) to [Kubernetes](https://kubernetes.io/). This will be covered in this blog together with the feature release in [GitLab 14.1](/releases/2021/07/22/gitlab-14-1-released/) of adding Helm Charts to the [GitLab Package Registry](https://docs.gitlab.com/ee/user/packages/package_registry/).\n\n### Install software to Kubernetes\n\nIn the DevOps era, [APIs](https://en.wikipedia.org/wiki/API) became incredibly popular, helping to drive demand for Kubernetes.\n\nThe core of Kubernetes' control plane is the API server. The API server exposes an HTTP REST API that lets end users, different parts of your cluster, and external components communicate with one another.\n\nTo interact with the API server we can use the command-line tool [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) - although it would be also possible to use software development kits (SDKs) or any client that understands REST like curl that was released 1997.\n\nBut which data format is best to use?\n\nModern APIs most likely use JSON. JSON is a human-readable format that provides provide access to machine-readable data. Here is an example for Kubernetes:\n\n```json\n{\n    \"kind\": \"Pod\",\n    \"apiVersion\": \"v1\",\n    \"metadata\": {\n        \"name\": \"nginx\",\n        \"creationTimestamp\": null,\n        \"labels\": {\n            \"run\": \"nginx\"\n        }\n    },\n    \"spec\": {\n        \"containers\": [\n            {\n                \"name\": \"nginx\",\n                \"image\": \"nginx\",\n                \"resources\": {}\n            }\n        ],\n        \"restartPolicy\": \"Always\",\n        \"dnsPolicy\": \"ClusterFirst\"\n    },\n    \"status\": {}\n}\n```\n\nOne downside of JSON is that comments are not supported. That is one several reasons why YAML stepped in and took the spot as the de-facto language to use for declarative configurations. The Kubernetes API transforms YAML to JSON behind the scenes. As you can easily convert back and forth between both, YAML tends to be more user-friendly. Nginx example Pod in YAML:\n\n```yaml\napiVersion: v1\nkind: Pod\nmetadata:\n  creationTimestamp: null\n  labels:\n    run: nginx\n  name: nginx\nspec:\n  Containers:\n  # NOTE: If no tag is specified latest will be used\n  - image: nginx\n    name: nginx\n    # TODO\n    resources: {}\n  dnsPolicy: ClusterFirst\n  restartPolicy: Always\nstatus: {}\n```\n\nNow you are ready to save our YAML code in a file called `nginx.yaml` and deploy it into Kubernetes:\n\n```shell\n$ kubectl apply --filename=nginx.yaml \n```\n\n### Create a Helm chart\n\nApplying YAML configuration files can get overwhelming, especially when needing to deploy into several environments or wanting to version the manifests. It is also cumbersome to maintain plain YAML files for more complex deployments which can easily extend to more than 1000 lines per file.\n\nInstead, how about using a format that packages our applications and makes them easily reproducible with templates? How about adding our own versioning scheme to this packaged application? How about deploying the same version with a few lines of code to multiple environments? This all comes with Helm.\n\nTo create a Helm package you have to ensure that the Helm CLI is [installed](https://helm.sh/docs/intro/install/) on your system (example with Homebrew on macOS: `brew install helm`).\n\n```shell\n$ helm create nginx \n```\n\nInspect the created Helm boilerplate files with `ls -lR` or `tree` on the CLI. This Helm chart can also be tested in a sandbox environment to verify it is operational.\n\n```shell\n.\n├── Chart.yaml\n├── charts\n├── templates\n│   ├── NOTES.txt\n│   ├── _helpers.tpl\n│   ├── deployment.yaml\n│   ├── hpa.yaml\n│   ├── ingress.yaml\n│   ├── service.yaml\n│   ├── serviceaccount.yaml\n│   └── tests\n│       └── test-connection.yaml\n└── values.yaml\n```\n\nNOTE: You can read more about the starter Chart [here](https://helm.sh/docs/chart_template_guide/getting_started/).\n\nKindly Helm creates a starter chart directory along with the common files and directories used in a chart with NGINX as an example. We again can install this into our Kubernetes cluster:\n\n```shell\n$ helm install nginx .\n```\n\n### Package Distribution\n\nThus far, we have learned that applications are packaged in containers and are installed using a Helm chart. Both methods require central distribution storage that is publicly accessible, or accessible in your local network environment where the Kubernetes clusters are running.\n\nThe Helm documentation provides insights on [running your own Helm registry](https://helm.sh/docs/topics/registries/), similar to hosting your own Docker container registry.\n\nWhat if we could avoid Do It Yourself DevOps and have both containers and Helm charts in one central DevOps platform? After maturing the [container registry in GitLab](https://docs.gitlab.com/ee/user/packages/container_registry/), community contributors helped add the [Helm chart registry](https://docs.gitlab.com/ee/user/packages/helm_repository/index.html) in 14.1.\n\nBuilding the container image and Helm chart is part of the CI/CD pipeline stages and jobs. The missing bit is the automated production deployment using Helm charts in your Kubernetes cluster.\n\nAn additional benefit in CI/CD is reusing the authentication mechanism, and working in the same trust environment with security jobs before actually uploading and publishing any containers and charts.\n\n### Build the Helm Chart\n\n```shell\n$ helm package nginx \n```\n\nThe command creates a new tar.gz archive ready to upload. Before doing so, you can inspect the archive with the `tar` command to verify its content.\n\n```shell\n$ tar ztf nginx-0.1.0.tgz\n\nnginx/Chart.yaml\nnginx/values.yaml\nnginx/templates/NOTES.txt\nnginx/templates/_helpers.tpl\nnginx/templates/deployment.yaml\nnginx/templates/hpa.yaml\nnginx/templates/ingress.yaml\nnginx/templates/service.yaml \nnginx/templates/serviceaccount.yaml\nnginx/templates/tests/test-connection.yaml\nnginx/.helmignore\n```\n\n### Push the Helm chart to the registry\n\nWith the [helm-push](https://github.com/chartmuseum/helm-push/#readme) plugin for Helm we can now upload the chart to the GitLab Helm Package Registry:\n\n```shell\n$ helm repo add --username \u003Cusername> --password \u003Cpersonal_access_token> \u003CREGISTRY_NAME> https://gitlab.com/api/v4/projects/\u003Cproject_id>/packages/helm/stable\n$ helm push nginx-0.1.0.tgz nginx\n```\n\nThis step should be automated for a production-ready deployment with a GitLab CI/CD job.\n\n```yaml\ndefault:\n  image: dtzar/helm-kubectl\n  before_script:\n    - 'helm repo add --username gitlab-ci-token --password ${CI_JOB_TOKEN} ${CI_PROJECT_NAME} ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/helm/stable'\nstages:\n  - upload\nupload:\n  stage: upload\n  script:\n    - 'helm plugin install https://github.com/chartmuseum/helm-push.git'\n    - 'helm push ./charts/podtatoserver-0.1.0.tgz ${CI_PROJECT_NAME}'\n```\n\n### Install the Helm chart\n\nFirst, add the Helm chart registry to your local CLI configuration and test the manual installation.\n\n```shell\n$ helm repo add --username \u003Cusername> --password \u003Cpersonal_access_token> \u003CREGISTRY_NAME> https://gitlab.com/api/v4/projects/\u003Cproject_id>/packages/helm/stable\n$ helm install --name nginx \u003CREGISTRY_NAME>/nginx\n```\n\nOnce it works, you can continue with adding an automated installation job into the CI/CD pipeline.\n\n```yaml\ndefault:\n  image: alpine/helm\n  before_script:\n    - 'helm repo add --username gitlab-ci-token --password ${CI_JOB_TOKEN} ${CI_PROJECT_NAME} ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/helm/stable'\nstages:\n  - install\nupload:\n  stage: install\n  script:\n    - 'helm repo update'\n    - 'helm install --name nginx ${CI_PROJECT_NAME}/nginx'\n```\n\n### Complete your DevOps lifecycle\n\nYou can learn more about the newest GitLab registries for Helm and Terraform in this [#EveryoneCanContribute cafe session](https://everyonecancontribute.com/post/2021-07-28-cafe-40-terraform-helm-gitlab-registry/) and inspect the [deployment repository](https://gitlab.com/everyonecancontribute/kubernetes/civo-k3s).\n\nTry the Helm chart registry and share your workflows. Are there any features missing to complete your DevOps lifecycle? Let us know [on Discord](https://discord.gg/qgQWhD6wWV).\n\nCover image by [Joseph Barrientos](https://unsplash.com/@jbcreate_) on [Unsplash](https://unsplash.com/photos/eUMEWE-7Ewg)\n{: .note}\n",[1128,1369,2572],{"slug":3401,"featured":6,"template":683},"improve-cd-workflows-helm-chart-registry","content:en-us:blog:improve-cd-workflows-helm-chart-registry.yml","Improve Cd Workflows Helm Chart Registry","en-us/blog/improve-cd-workflows-helm-chart-registry.yml","en-us/blog/improve-cd-workflows-helm-chart-registry",{"_path":3407,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3408,"content":3413,"config":3418,"_id":3420,"_type":16,"title":3421,"_source":18,"_file":3422,"_stem":3423,"_extension":21},"/en-us/blog/gitlab-inc-takes-the-devops-platform-public",{"title":3409,"description":3410,"ogTitle":3409,"ogDescription":3410,"noIndex":6,"ogImage":1874,"ogUrl":3411,"ogSiteName":697,"ogType":698,"canonicalUrls":3411,"schema":3412},"GitLab Inc. takes The DevOps Platform public","Today is the day GitLab Inc. takes The DevOps Platform public.","https://about.gitlab.com/blog/gitlab-inc-takes-the-devops-platform-public","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Inc. takes The DevOps Platform public\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2021-10-14\",\n      }",{"title":3409,"description":3410,"authors":3414,"heroImage":1874,"date":3415,"body":3416,"category":14,"tags":3417},[2379],"2021-10-14","\nToday, GitLab Inc. announced the next milestone in our journey as we become a publicly traded company on the Nasdaq Global Market (NASDAQ: GTLB). GitLab was the first company to publicly live stream the entire end-to-end listing day at Nasdaq. \n\nIn a world where software defines the speed of innovation, every company must become a software company or they’ll be disrupted by a software company. We believe that GitLab, the DevOps Platform, helps companies to deliver software faster and more efficiently, while strengthening security and compliance. And it all happens inside our single platform where engineering, security, and operations teams can collaborate together. \n\nIn my [Founder’s Letter](#foundersletter), which you can read below, I told GitLab’s origin story. GitLab did not start in a tech incubator, garage, or Bay Area apartment. In 2011, my co-founder, Dmitriy Zaporozhets, created GitLab from his house in Ukraine. In 2012, I discovered GitLab from my home in the Netherlands on a tech news site. I thought that it was natural that a collaboration tool for developers was open source so people could contribute to it. As a Ruby developer, I was impressed by GitLab’s code quality, especially since it absorbed more than 300 contributions in the first year. In 2013, Dmitriy tweeted that he would like to work on GitLab full-time. After reading that tweet, I approached him, and we partnered so he could work on GitLab full-time. We incorporated GitLab Inc. in 2014 and applied to Y Combinator, a technology accelerator in Silicon Valley. In 2015, we participated in their program, and this greatly accelerated our business.\n\nTo ensure the quality of the GitLab application, Dmitriy built a second application, GitLab CI, to automatically test our code. In 2015, Kamil Trzciński, a member of the wider community, contributed a better version of the GitLab CI application so that it could run jobs in parallel. Dmitriy and I quickly made this new Runner the default version, and Kamil ended up joining the company. Kamil proposed integrating the two applications, which Dimitriy and I initially disagreed with. Thankfully, Kamil persisted in arguing for combining GitLab and GitLab CI into a single application. Dmitriy and I came around to Kamil’s point of view and the results were far better than anyone expected. The single application was easier to understand, faster to use, and enabled collaboration across functions. We had invented what we believed to be the first true DevOps platform and proceeded to build it out. \n\nToday, we believe that GitLab is the leading DevOps platform with an estimated 30 million registered users. GitLab's mission is to ensure that everyone can contribute. When everyone can contribute, users become contributors, and we greatly increase the rate of innovation. \n\n“GitLab also has more than 2,600 contributors in its open source community, which it lists as a competitive strength” - Stephanie Condon, ZDNet*\n\nWe are making progress toward our mission by elevating others through knowledge sharing, job access, and our software platform.\n\nGitLab’s values and underlying operational principles are core to our past, present, and future success. Most companies regress to the mean and slow down over time. We plan to maintain our startup ethos by continuing to do the following:\n\n- Reinforcing our values\n- Making quick, informed decisions\n- Designating a directly responsible individual (DRI) to own decision making for a workstream or initiative\n- Organizing informal communications\n- Challenging conventions and using boring solutions\n- Having a bias for action\n- Remembering we are an organization, not a family\n- Having time based releases\n- Supporting individual innovation through coaches and incubation\n- Dogfooding\n\nWe believe our approach has an impact on not only our business, but the industry as a whole. And we are not the only ones. \n\n“There are few companies that have had as positive an impact on the culture of an industry as @gitlab has.” - James Wise, Partner, Balderton on Twitter\n\nFrom day 1, we have co-created with the wider GitLab community, and together we have advanced the DevOps Platform. I am excited to keep building to make GitLab’s “everyone can contribute” mission a reality.\n\n## \u003Ca name=\"foundersletter\">\u003C/a> Founder’s Letter from the GitLab S-1\n\n## Letter From Our CEO\n\n**Origins**\n\nGitLab did not start in a tech incubator, garage, or Bay Area apartment. In 2011, my co-founder, Dmitriy Zaporozhets, created GitLab from his house in Ukraine. \n\nIn 2012, I discovered GitLab from my home in the Netherlands on a tech news site. I thought that it was natural that a collaboration tool for developers was open source so people could contribute to it. As a Ruby developer, I was impressed by GitLab’s code quality, especially since it absorbed more than 300 contributions in the first year. In 2013, Dmitriy tweeted that he would like to work on GitLab full-time. After reading that tweet, I approached him, and we partnered so he could work on GitLab full-time. We incorporated GitLab in 2014 and applied to Y Combinator, a technology accelerator in Silicon Valley. In 2015, we participated in their program, and this greatly accelerated our business.\n\n**DevOps Platform**\n\nTo ensure the quality of the GitLab application, Dmitriy built a second application, GitLab CI, to automatically test our code. In 2015, Kamil Trzciński, a member of the wider community, contributed a better version of the GitLab CI application so that it could run jobs in parallel. Dmitriy and I quickly made this new Runner the default version, and Kamil ended up joining the company.\n\nWhen Kamil proposed integrating the two applications, Dimitriy and I initially disagreed with him. Dmitriy felt that the applications were already integrated as well as two separate applications could be. And I believed that customers wanted to mix and match solutions. Thankfully, Kamil persisted in arguing for combining GitLab and GitLab CI into a single application. Dmitriy and I came around to Kamil’s point of view once we realized that combining the two applications would lead to greater efficiency for our team members and our users.\n\nThe results were far better than anyone expected. A single application was easier to understand, faster to use, and enabled collaboration across functions. We had invented what we believed to be the first true DevOps platform and proceeded to build it out. Kamil’s advocacy inspired GitLab’s “disagree, commit, and disagree'' sub-value. We allow GitLab team members to question decisions even after they are made. However, team members are required to achieve results on every decision while it stands, even while they are trying to have it changed.\n\n**Mission**\n\nGitLab's mission is to ensure that everyone can contribute. When everyone can contribute, users become contributors, and we greatly increase the rate of innovation. We are making progress toward our mission by elevating others through knowledge sharing, job access, and our software platform. We promote knowledge sharing through publishing how we operate in our handbook, an online repository of how we run the company that now totals more than 2,000 webpages. The lessons we have learned and put in the handbook are available to anyone with an internet connection. We contribute to job access by helping people with their tech careers and educating the world on remote work best practices. We believe that remote work is spreading job access more evenly across regions and countries. Our software platform brings together development, operations, and security professionals and makes it faster and more secure for them to innovate together.\n\n**Stewardship**\n\nMost of the time, when a company starts commercializing an open source software project, the wider community around the project shrinks. This has not been the case with GitLab. The wider community around GitLab is still growing. We are proud that GitLab is a co-creation of GitLab team members and users. We have ten stewardship promises that commit us to balancing the need to generate revenue with the needs of the open source project and the wider community. In our first year, we received just over 300 code contributions. Now, we frequently exceed this number in a single month.\n\n**Values**\n\nFrom the beginning of GitLab, we have been all-remote as the initial team members lived in the Netherlands, Ukraine, and Serbia. GitLab was founded before remote work was a proven model, so investors were worried about our ability to effectively manage the business and scale. That early skepticism required us to establish explicit mechanisms for value reinforcement. We now have over 20 mechanisms listed in our handbook. Some reinforcements are small. For example, team members have access to a Zoom background that showcases each of our values as icons. Others are more substantial. For example, every team member’s promotion document is structured around our values and shared with the entire company.\n\nGitLab’s values and underlying operational principles are core to our past, present, and future success. These values are:\n\n1. Results - This is the most important value in our values hierarchy as strong results enable us to keep doing the right things. If we have strong business momentum, we can continue to invest toward our ambitious, long-term mission. We care about what is achieved, not the hours worked. Since you get what you measure and reward, we do not encourage long hours and instead focus on results. For example, to discourage team members from focusing on hours worked, team members are discouraged from publicly thanking others for working long hours or late nights. This is intended to prevent pressure to work longer hours or highlighting longer hours as something that is rewarded.\n2. Collaboration - Team members must work effectively with others to achieve results. To encourage collaboration, we have about four group conversations per week. These are meetings in which departments at GitLab share their results with team members throughout the company. Group conversations enable all team members to understand and question every part of the business. This access to information and context supports collaboration.\n3. Efficiency - Working efficiently enables us to make fast progress, which makes work more fulfilling. For example, we only hold meetings when topics need to be discussed synchronously. When we do have a meeting, we share the discussion topics, the slide deck, and sometimes a recording of someone presenting the slide deck beforehand. This way we can dedicate the synchronous time of the meeting to discussion, not team members presenting material. We also have speedy meetings that are short, start on time, and end at least five minutes before the next one begins. We encourage team members to work together in public chat channels as much as possible instead of through direct messages. This makes information readily available to anyone who is interested or may become interested at a future point.\n4. Diversity, Inclusion, and Belonging (DIB) - We believe that team member diversity leads to better decisions and a greater sense of team member belonging. We spend more money than the industry average per hire to ensure we approach a diverse set of candidates. We have a DIB Program which includes Team Member Resource Groups (TMRGs), voluntary, team member-led groups, focused on fostering DIB within GitLab. I'm proud of team member driven initiatives such as mentoring for an advanced software engineering course at Morehouse College, a historically Black liberal arts school. We also do Reverse Ask Me Anything, meetings in which I ask questions of Team Member Resource Groups and get to learn from their experiences. We try to work asynchronously as much as possible to not be dependent on time zone overlap. This enables us to hire and work with people around the world from different cultures and backgrounds.\n5. Iteration - By reducing the scope of deliverables, we are able to complete them earlier and get faster feedback. Faster feedback gives us valuable information that guides what we do next. We measure and set targets for how many changes are expected from each engineering team. This encourages teams to reduce the scope of what they build and ship changes in smaller increments. We know that smaller changes are easier to review and less risky. The end result is that we are able to get more done as the higher frequency of changes more than compensates for the smaller size of them. We release features and categories even when they are minimally viable. We do not wait for perfection when we can offer something of value, get feedback, and allow others to contribute to features by refining and expanding upon them.\n6. Transparency - By making information public, we can reduce the threshold to contribute and make collaboration easier. In addition to our publicly shared handbook, we also livestream and share recordings of some of our meetings. I have CEO Shadows who attend all my GitLab meetings during a two week rotation. We are public about our strategy, risks, and product direction.\n\nThese are living values that are updated over time. In 2020 alone, we made 329 improvements to the GitLab Values page of our handbook.\n\n**Still a Startup**\n\nMost companies regress to the mean and slow down over time. We plan to maintain our startup ethos by doing the following:\n\n1. **Reinforcing our values**: We have more than 20 documented ways to reinforce GitLab’s values. Since hiring, bonuses, and promotions provide strong signals of what is valued and rewarded, we make values the lens through which we evaluate team member fit and advancement.\n2. **Quick and informed decisions**: We are able to combine the advantages of consensus organizations and hierarchical organizations by splitting decisions into two phases. In the data gathering phase, we employ the best of consensus organizations as we encourage people to contribute their ideas and opinions. In the decision phase, we benefit from the best of hierarchical organizations with one person, the directly responsible individual, deciding what to do without having to convince the people who made suggestions.\n3. **A directly responsible individual (DRI)**: A DRI is a single person who owns decision making authority and responsibility for the success of a given workstream or initiative. We avoid confusion and empower team members by being clear about the DRI. With a few documented exceptions, the person who does the work resulting from the decision gets to make the decision. DRIs tend to have the context required for good decision making and are empowered by their ability to use their own judgement in doing what is best for the business.\n4. **Organize informal communications**: Informal team member communications, such as a chat about life outside of work, are necessary for building trust. Trust is essential for great business results. Many businesses invest heavily in offices and facilities, because they believe offices are necessary for informal communication.\n\nDuring the pandemic, many businesses that were forced to work remotely discovered that productivity increased. Many of these same businesses are now making plans to return to the office. One reason being given for the return to the office is that not everyone can work from home. We solve this by allowing people to rent work space. The other main reason given is that people miss working from a central office with co-workers. I don’t think that people miss the commute or the office furniture. They miss informal communication. Central offices are a really expensive, inconvenient, and indirect way to facilitate information communication. It is more efficient to directly organize informal communication.\n\nFor example, every person who joins GitLab has to schedule at least five coffee chats during their onboarding. We also have social calls, Ask Me Anything meetings with senior leaders, and 15 other explicit ways to encourage employee connections and relationship building. Intentionally organizing informal communication enables the trust-building conversations that are essential for collaboration. This can be more effective than relying on chance encounters in an office building. You can connect with team members throughout the world and across departments through a coffee chat. You may not meet people outside of your own floor in an office setting.\n\n5. **Challenge conventions**: We do not do things differently for the sake of being different, and we use boring solutions whenever possible. That said, we're also willing to deviate from conventions when it can benefit GitLab and the wider community. Before the COVID-19 pandemic, we believe GitLab was the largest all-remote company in the world. We now teach others how to succeed as remote companies and employees. We aim to be the most transparent company of our size. This transparency has had demonstrable benefits ranging from increased team member productivity to enhanced brand awareness. What some saw as a liability, we have shown to be a strength.\n6. **Bias for action**: Decisions should be thoughtful, but delivering fast results requires the fearless acceptance of occasionally making mistakes. Our bias for action may result in the occasional mistake, but it also allows us to course correct quickly. We keep the stakes low for mistakes for the sake of transparency. When people are comfortable communicating missteps, risk aversion and secrecy don’t become the norm.\n7. **Not a family**: Some companies talk about being a 'Family.' We don't think that is the right perspective. At GitLab, the relationship is not the end goal. The goal is results. We are clear about accountability and hold people to a clearly articulated standard. When people do not perform, we try to help them improve. If they still can’t meet expectations, we let them go.\n8. **Time based release**: We have introduced a new, enhanced version of our software on the 22nd of every month for over nine years. A time based release ensures that when a feature is ready, its release will not be held up by another that is not. Aligned with our value of iteration, we try to reduce the scope of each feature so that it fits in a single release.\n9. **Individual innovation**: We empower individuals to innovate. For example, we have designated coaches who support contributors from the wider community in getting their contributions to the point where they can be merged by GitLab. We also have an incubation department dedicated to quickly turning ideas into viable features and products.\n10. **Dogfooding**: The best way to quickly improve GitLab is to use it ourselves, or dogfood it, so that we have a quick feedback loop. We use our own product even when a feature is in its early stages of development. This helps us to develop empathy with users and better understand what to build next.\n\n## Long-Term Focus\n\nMore than 40 million software professionals are driving change through software, and this number is growing. These software professionals are rapidly adopting DevOps to accelerate this change. Gartner predicts that by 2023, 40% of organizations will have switched from multiple point solutions to DevOps value stream delivery platforms to streamline application delivery, versus less than 10% in 2020. I believe that 40% is just the beginning, and almost all organizations will eventually use a DevOps Platform. GitLab has a unique opportunity to lead the DevOps Platform market and shape innovation.\nWith a large addressable market, GitLab plans to optimize for long term growth--even if it comes at the expense of short-term profitability. This means that we may not make a profit for a long time as we need to weigh profitability against the clear opportunity to pursue larger, future returns.\n\n## Closing\n\nWith the wider GitLab community, we have created and advanced the DevOps Platform. I am excited to keep building to make GitLab’s “everyone can contribute” mission a reality. I look forward to welcoming investors who share our enthusiasm for collaboration and innovation.\n\n* 2,600 contributors as of July 31, 2021\n",[837,1128],{"slug":3419,"featured":6,"template":683},"gitlab-inc-takes-the-devops-platform-public","content:en-us:blog:gitlab-inc-takes-the-devops-platform-public.yml","Gitlab Inc Takes The Devops Platform Public","en-us/blog/gitlab-inc-takes-the-devops-platform-public.yml","en-us/blog/gitlab-inc-takes-the-devops-platform-public",{"_path":3425,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3426,"content":3432,"config":3438,"_id":3440,"_type":16,"title":3441,"_source":18,"_file":3442,"_stem":3443,"_extension":21},"/en-us/blog/open-shift-ga",{"title":3427,"description":3428,"ogTitle":3427,"ogDescription":3428,"noIndex":6,"ogImage":3429,"ogUrl":3430,"ogSiteName":697,"ogType":698,"canonicalUrls":3430,"schema":3431},"GitLab's Kubernetes Operator for OpenShift now available","GitLab Operator will allow teams to run production instances of GitLab on Kubernetes platforms.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667835/Blog/Hero%20Images/generic-bot-bg.png","https://about.gitlab.com/blog/open-shift-ga","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's Kubernetes Operator with support for Red Hat OpenShift is now generally available\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2021-10-12\",\n      }",{"title":3433,"description":3428,"authors":3434,"heroImage":3429,"date":3436,"body":3437,"category":14},"GitLab's Kubernetes Operator with support for Red Hat OpenShift is now generally available",[3435],"William Chia","2021-10-12","Today, GitLab is pleased to announce the general availability (GA) of the GitLab-supported [GitLab Operator](https://docs.gitlab.com/operator/), with the ability to run production instances of GitLab on Kubernetes platforms, including Red Hat OpenShift. \n\nIT organizations that rely on Red Hat OpenShift can now deploy and run GitLab on the same infrastructure they trust for their other shared services. For many organizations in the public sector and regulated industries that only use Red Hat OpenShift, the Operator unlocks the ability to use GitLab, the DevOps Platform, allowing them to move out of [the DIY DevOps stage](/blog/welcome-to-the-devops-platform-era/) and embrace modern DevOps practices with greater speed, efficiency, and improved security. \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/sEBnuhzYD2I\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Why did we build the GitLab Operator? \n\nWe believe user expectations are at an all-time high. They expect always-on applications, available from any device, anywhere in the world. In order to meet these needs software teams employ cloud native methodologies and architectures such as containers and microservices to run reliably at scale. Kubernetes has emerged as the de facto standard for container orchestration and has seen broad adoption by companies of all sizes across all industries. \n\nRed Hat OpenShift is an enterprise-grade Kubernetes distribution that adds productivity and security features designed for the needs of production workloads and systems. Red Hat OpenShift has become a leading choice for security-and-compliance-conscious companies in markets such as the public sector as well as regulated industries.\n\nEnterprise shared services groups often favor running IT applications inside of the Kubernetes or Red Hat OpenShift cluster in order to take advantage of the resilience and scale it offers. Each app they can’t run inside their cluster creates operational overhead as it forces them to manage legacy infrastructure in order to support the app. \n\nGitLab was an early adopter of Kubernetes, adding capabilities such as a built-in container registry and native deployment from GitLab CI/CD to a Kubernetes Cluster as early as 2016 [[1]](/releases/2016/12/22/gitlab-8-15-released/) [[2]](/releases/2016/12/22/gitlab-8-15-released/), with official installation of GitLab instances via Helm chart in 2017 [[3]](/releases/2017/05/22/gitlab-9-2-released/#official-gitlab-installation-on-kubernetes). As Kubernetes quickly evolved, often with breaking changes, GitLab chose to focus on vanilla Kubernetes in order to establish a strong foundation to reach as broad a base as possible. However, this meant IT organizations running their applications in a Red Hat OpenShift cluster either couldn’t use GitLab or needed to deploy their instance into separate legacy infrastructure with all of the associated overhead. \n\n## Introducing the GitLab Operator!\n\nEarlier this year the [GitLab 13.11](https://about.gitlab.com/releases/2021/04/22/gitlab-13-11-released/#deploy-gitlab-on-openshift-and-kubernetes-with-the-gitlab-operator-beta) release went live accompanied by [the beta release of the GitLab Operator](https://cloud.redhat.com/blog/test-the-new-gitlab-operator-for-openshift). Over the past 6 months, GitLab has worked closely with Red Hat to discuss technical details and optimize compatibility. Using the [Operator pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) developed by CoreOS, the GitLab Operator provides an enhanced way to deploy and operate GitLab. \n\n## What is an Operator? \n\nGenerally, the intention of an Operator is to take the operational knowledge of an administrator and automate it with software running inside the cluster. Day 1 tasks, such as installation and configurations, along with Day 2 tasks such as upgrades with minimized downtime, are now integrated into the cluster and actioned through an Operator. \t\n\n## Expanding Beyond the GitLab Helm Chart\n\nThe GitLab Operator and [Cloud Native Helm Chart](https://docs.gitlab.com/charts/) are offered in tandem as deployment solutions for cloud native environments. Behind the scenes, the Operator consumes the Helm chart to model operations. Both are officially supported patterns for deployment.\n\nThe Operator offers extended capabilities beyond the Cloud Native Helm Chart. The Operator functions to not only deploy GitLab initially, it also actively secures the deployment against unwarranted changes and keeps GitLab continually up-to-date as components are versioned. Most importantly, the GA release of the GitLab Operator provides the ability to run production instances of GitLab on Red Hat Openshift (with official Red Hat OpenShift certification coming soon!). While the Helm Chart only supports vanilla Kubernetes, the Operator runs on both Red Hat OpenShift and vanilla Kubernetes. \n\n## Get started \n\nVisit [the GitLab Operator documentation](https://docs.gitlab.com/operator/) for more information on known limitations and prerequisites along with a full installation guide. \n",{"slug":3439,"featured":6,"template":683},"open-shift-ga","content:en-us:blog:open-shift-ga.yml","Open Shift Ga","en-us/blog/open-shift-ga.yml","en-us/blog/open-shift-ga",{"_path":3445,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3446,"content":3451,"config":3456,"_id":3458,"_type":16,"title":3459,"_source":18,"_file":3460,"_stem":3461,"_extension":21},"/en-us/blog/a-look-at-devops-salaries",{"title":3447,"description":3448,"ogTitle":3447,"ogDescription":3448,"noIndex":6,"ogImage":1874,"ogUrl":3449,"ogSiteName":697,"ogType":698,"canonicalUrls":3449,"schema":3450},"DevOps salaries in 2021: where do you rank?","Another surprise benefit of working on a DevOps platform? A higher salary! Here's why DevOps salaries are going up, and where to find the biggest paychecks.","https://about.gitlab.com/blog/a-look-at-devops-salaries","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevOps salaries in 2021: where do you rank?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2021-10-07\",\n      }",{"title":3447,"description":3448,"authors":3452,"heroImage":1874,"date":3453,"body":3454,"category":14,"tags":3455},[1364],"2021-10-07","\n_This is the first in an occasional series of blog posts looking at DevOps salaries and careers._\n\nDespite the COVID-19 pandemic and the subsequent economic crisis that has disrupted lives and business across industries and around the world, demand for DevOps professionals remains strong and salaries continue to increase.\n\nThe IT industry, in general, fared better than many during the economic uncertainty of 2020 and 2021. With a strong IT infrastructure already in place and IT professionals accustomed to working remotely, increased demand and short supply for IT workers meant [IT salaries held steady or rose](https://rlc.randstadusa.com/for-business/learning-center/salary-insights/salary-guide/IT-technologies) in turbulent times, reported Randstad, a multinational human resources consulting firm. \n\nAnd DevOps professionals did even better than most in IT.\n\n## DevOps salaries are on the rise\n\n[DevOps, simply put,](/topics/devops/) is one of the hottest areas in the technology industry. Robert Half International Inc., a major human resources consulting firm, lists DevOps in the top 10 most in-demand jobs in 2021. Actually, in early September the firm [listed DevOps as the second hottest IT job](https://www.roberthalf.com/blog/salaries-and-skills/the-13-highest-paying-it-jobs-in-2019), just behind big data engineers, and surpassing cloud architects, security managers and database managers. And Randstad also ranked DevOps high in its [list of in-demand technology roles.](\u003Chttps://rlc.randstadusa.com/for-business/learning-center/salary-insights/salary-guide/IT-technologies>)\n\nAccording to salary watchers like Randstad, Glassdoor and ZipRecruiter, DevOps engineers, for instance, generally make approximately $100,000 to $150,000. Based on average U.S. salaries on Glassdoor, DevOps engineers are number 8 for 10 top-paying IT jobs in 2021. And DevOps developers, who Randstad calls one of the most in-demand technology roles, are doing well, too. They are in line to make $112,785 (for those with one year of experience) to $165,980 (for 10 years or more of experience). Those figures, of course, greatly depend on location and skill level.\n\nJust to drive the point home, Amanda Stansell, a data scientist at Glassdoor, said in a report earlier this year that [DevOps engineers](https://about.gitlab.com/topics/devops/what-is-a-devops-engineer/) made her list of [Top 10 Best Jobs in America for 2021.](\u003Chttps://www.glassdoor.com/research/best-jobs-in-america-for-2021/>) She bases her calculations on earning potential, overall job satisfaction, and number of job openings listed on Glassdoor. The role of DevOps engineer came in squarely in the middle at #5 - between Java developer at #1 and dentist at #10.\n\n## Demand for DevOps professionals is skyrocketing\n\nAccording to the Randstad 2021 Salary Guide, demand for DevOps developers is currently skyrocketing. “For employers… the average time-to-fill for these roles is north of 50 days,” the report noted. “That likely means many would-be employers today are instead suffering from key vacancies in their IT departments. Worse, with average annual salaries for DevOps developers at $137,830 — higher even than the average for cloud engineers — organizations should expect to spend considerable budget just to be in the running for skilled and experienced developers. It’s a pay-to-play hiring environment.”\n\n## Breaking down the DevOps salaries\n\nLet’s take a closer look at how pay for some DevOps positions breaks down:\n\n* ZipRecruiter reports that a DevOps engineer in San Francisco can take home $132,934, while the same position in Boston, Mass. would garner $113,552. In Austin, Texas, that engineer could earn $110,240 but in Boise, Idaho that drops to $102,093.\n\n* According to ZipRecruiter, the top five [highest paying cities for DevOps engineers](https://www.ziprecruiter.com/Salaries/Devops-Engineer-Salary) are Sunnyvale, Calif. (at $144,494); Santa Rosa, Calif. ($139,673); Cambridge, Mass. ($135,440); Vacaville, Calif. ($132,838), and New York City ($131,356).\n\n* The [top five best states](https://www.ziprecruiter.com/Salaries/What-Is-the-Average-Devops-Engineer-Salary-by-State) for DevOps engineers to earn the most are Massachusetts, Hawaii, Connecticut, Tennessee and Minnesota. \n\n## Read more on DevOps careers: \t\t\n\n- [Best advice for your DevOps career? Keep on learning](/blog/best-advice-for-your-devops-career-keep-on-learning/)\n\n- [6 tips to make software developer hiring easier](/blog/6-tips-to-make-software-developer-hiring-easier/)\n\n- [Four tips to increase your DevOps salary](/blog/four-tips-to-increase-your-devops-salary/)\n\n- [Have DevOps jobs to fill? Try these 3 strategies to hire and retain](/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain/)\n",[1128,1408,2173],{"slug":3457,"featured":6,"template":683},"a-look-at-devops-salaries","content:en-us:blog:a-look-at-devops-salaries.yml","A Look At Devops Salaries","en-us/blog/a-look-at-devops-salaries.yml","en-us/blog/a-look-at-devops-salaries",{"_path":3463,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3464,"content":3469,"config":3474,"_id":3476,"_type":16,"title":3477,"_source":18,"_file":3478,"_stem":3479,"_extension":21},"/en-us/blog/want-faster-releases-your-answer-lies-in-automated-software-testing",{"title":3465,"description":3466,"ogTitle":3465,"ogDescription":3466,"noIndex":6,"ogImage":1874,"ogUrl":3467,"ogSiteName":697,"ogType":698,"canonicalUrls":3467,"schema":3468},"Want faster releases? Your answer lies in automated software testing","The trouble with testing? Nearly everything! Here's why automated software testing is so hard to get right, and how a DevOps platform can help.","https://about.gitlab.com/blog/want-faster-releases-your-answer-lies-in-automated-software-testing","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Want faster releases? Your answer lies in automated software testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2021-09-30\",\n      }",{"title":3465,"description":3466,"authors":3470,"heroImage":1874,"date":3471,"body":3472,"category":14,"tags":3473},[1859],"2021-09-30","\n\nFor three years in a row, our Global DevSecOps Survey found testing was the number one reason (by large margins) for release delays. A lack of automated software testing, combined with too many manual tests conducted too late in the process, was a story told time after time, and it certainly was one without any kind of happy ending.\n\nDespite the undeniable progress DevOps has brought to software development, integrating automated software testing into the lifecycle has remained an elusive goal for many teams. Here’s a look at why testing is such a difficult step to get right, and how an integrated DevOps Platform can bring much-needed structure to the process.\n\n## The state of automated software testing\n\nAccording to our [2021 Survey](/developer-survey/), it’s safe to say respondents are _frustrated_ with software testing.\n\n_“Testing can be slow in both writing and running.”_\n\n_“Testing delays everything.”_\n\nWhile there is forward momentum (almost 25% of teams say they’re fully automated - more than double the number from 2020), the same percentage reported zero automation or that they’re just beginning to think about it. \n\n_“Automated testing is ignored ‘due to time constraints.’”_\n\nBut even teams that haven’t ignored automated software testing are hamstrung because the vast majority don’t give developers scan results **within their IDEs.** Fewer than 25% of teams enable [SAST](/blog/developer-intro-sast-dast/) lite scanners in a web IDE and only 20% put results in a web pipeline report for developers. The situation is even worse when it comes to DAST, dependency and container scans: just 16% make DAST/dependency scan data available, and 14% do the same for container scans. While the percentage of teams reporting full automated software testing increased from 2020 to 2021, the percentage giving devs access to key test data barely changed in the same time frame.\n\n## Context switching makes everything hard\n\nThe fact that developers can’t easily get access to test results is a huge productivity blocker. “The best time to (fix bugs) is when I’m in \"flow\" - right when I’m writing the code and have a mental model of all of the things and how they are interconnected,” explained [Brendan O’Leary](/company/team/#brendan), senior developer evangelist at GitLab, in a blog post last year talking about [the developer-security divide](/blog/developer-security-divide/). “So that’s basically the same day or same week as when I wrote it.”\n\n**Elevating your DevOps skills? Join us at [Commit at KubeCon](/events/commit/) - Oct. 11!**\n\nSo while not getting results “in the flow” is a huge stumbling block, developers are adamant about the importance of testing. When we asked developer respondents in our 2021 Survey what they wished they could do more of, testing was, by far, the number one response. \n\nWhat’s the solution to this conundrum? More automation, [more AI/ML](/blog/ai-in-software-development/) and a [DevOps platform](/solutions/devops-platform/) to make everything seamlessly interconnected, visible and actionable.\n\nGeo-sharing company Glympse offers an object lesson on [the benefits of a DevOps platform](/customers/glympse/). The company was using approximately 20 tools to get its software out the door, but after moving to GitLab’s DevOps Platform, the process was dramatically streamlined. Deployments have dropped from four hours to  less than 30 minutes, and deployment fatigue, particularly around testing and code reviews, has vanished. \n\n## The struggle is real, but worth it\n\nFor teams who’ve tamed the automated software testing beast, and are humming along in their DevOps practices, the benefits are substantial. Here’s what they told us in our 2021 Survey:\n\n_“We are not relying on developers to have remembered to create and run tests for their code before deploying.”_\n\n_“We automate everything possible, to be able to test our product ‘like in real life’ without any downside. This increases confidence and simplifies tests for everything.”_\n\n_“Integration testing has been a big plus in how confident we are to release automatically and deliver a version. We are now able to deliver any day.”_\n\n_“It helps that devs don't need to keep track of test running; they just need to push and pipeline will check their code before merge to master.”_\n",[792,1128,1589],{"slug":3475,"featured":6,"template":683},"want-faster-releases-your-answer-lies-in-automated-software-testing","content:en-us:blog:want-faster-releases-your-answer-lies-in-automated-software-testing.yml","Want Faster Releases Your Answer Lies In Automated Software Testing","en-us/blog/want-faster-releases-your-answer-lies-in-automated-software-testing.yml","en-us/blog/want-faster-releases-your-answer-lies-in-automated-software-testing",{"_path":3481,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3482,"content":3487,"config":3492,"_id":3494,"_type":16,"title":3495,"_source":18,"_file":3496,"_stem":3497,"_extension":21},"/en-us/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain",{"title":3483,"description":3484,"ogTitle":3483,"ogDescription":3484,"noIndex":6,"ogImage":1874,"ogUrl":3485,"ogSiteName":697,"ogType":698,"canonicalUrls":3485,"schema":3486},"Have DevOps jobs to fill? Try these 3 strategies to hire and retain","So many DevOps jobs posted, so few options to fill them. Here's why hiring and retaining developers is tricky, and how 3 thoughtful strategies, including a DevOps platform, can help.","https://about.gitlab.com/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Have DevOps jobs to fill? Try these 3 strategies to hire and retain\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2021-09-23\",\n      }",{"title":3483,"description":3484,"authors":3488,"heroImage":1874,"date":3489,"body":3490,"category":14,"tags":3491},[1859],"2021-09-23","\nIf every company is a software company, how do you stand out from the crowd when it comes to attracting developer talent and filling DevOps jobs?\n\nThere’s a well-known, and worldwide, shortage of software developers, especially those with expertise in DevOps. Worse still, demand for those roles is accelerating rapidly: The US Bureau of Labor Statistics predicts employment opportunities for devs and testers will [increase 22% between 2020 and 2030](https://www.bls.gov/ooh/computer-and-information-technology/software-developers.htm#tab-6). That growth rate means nearly 190,000 net new developer/QA/test jobs will be opening each year, according to the BLS. \n\nThat’s all a long way of saying things are tough out there. Organizations looking to expand, or even just maintain, their DevOps jobs momentum have to find unique ways to stand out from the crowd because, as [many surveys have shown](https://hired.com/state-of-software-engineers#report), salary alone is often insufficient to both attract and retain developer talent.\n\n**Elevating your DevOps skills? Join us at [Commit at KubeCon - Oct. 11!](/events/commit/)**\n\nHere are 3 ways organizations can create an environment where DevOps can thrive, boosting developer retention, job satisfaction and even “cool place to work” street cred.\n\n## Make (a few) cool tools rule\n\nDevelopers are known for their big love of tools. In our [2021 Global DevSecOps Survey](/developer-survey/), more than one-quarter of respondents said they used between 5 and ten tool chains, and more than half said each tool chain had an average of 5 tools on it. Do the math and it’s clear that’s a lot of tools, and according to [research on software developer job satisfaction](https://link.springer.com/chapter/10.1007/978-1-4842-4221-6_10) too much information (i.e., from **too many tools**) can lead to less productivity and unhappy developers.\n\nThe solution to this very common problem can be found by adopting a DevOps platform, a single application where every stage of DevOps is interconnected, visible and seamless. And make sure that platform can integrate with all the key, cutting edge, “must have” kinds of tools that developers like to put on their resumes, and everyone will benefit from this streamlined approach.\n\n## Pay attention to career education\n\nDevelopers are always willing to DIY career education. The latest Stack Overflow Survey found about 60% of their survey takers [taught themselves coding via an online source](https://insights.stackoverflow.com/survey/2021#developer-profile-experience) – but that doesn’t mean they wouldn’t value (and take advantage of) training opportunities from employers. In our 2021 survey, a majority of developers said they’re most excited to learn about AI/ML, while ops pros were looking for education around advanced programming languages. \n\nBy asking DevOps team members about their interests and needs, organizations can keep a pulse on training opportunities they could offer that will actually matter to their teams and potentially make filling DevOps jobs easier.\n\n## Be flexible about everything\n\nFrom working remotely to working part-time, it’s clear that developers want the option to mix it up if possible. The more options - like having the time to pursue a degree or a passion - given to DevOps team members, the more likely they are to be satisfied with their jobs. \n\nAlso, time to pursue some “off the books” projects is another smart company perk. Don’t forget the role open source projects played in the pandemic (here are [a few examples](https://www.newamerica.org/digital-impact-governance-initiative/reports/building-and-reusing-open-source-tools-government/open-source-project-hubs-for-covid-19/)), making an already important part of a developer’s role even more compelling. In fact, more than 69% of our survey respondents told us they were involved with at least one open source project in 2021, and that number was up 6% from 2020.  \n\n## Don't forget DevOps\n\nIt’s a temperamental DevOps job market, certainly, but organizations with healthy DevOps practices do have one secret weapon: DevOps itself. When we asked our 4,300+ survey takers what the top benefits of DevOps was, “happier developers” was near the top of the list. \n\n## Read more on DevOps careers: \t\t\n\n- [Best advice for your DevOps career? Keep on learning](/blog/best-advice-for-your-devops-career-keep-on-learning/)\n\n- [6 tips to make software developer hiring easier](/blog/6-tips-to-make-software-developer-hiring-easier/)\n\n- [Four tips to increase your DevOps salary](/blog/four-tips-to-increase-your-devops-salary/)\n\n- [DevOps salaries in 2021: Where do you rank?](/blog/a-look-at-devops-salaries/)\n\n",[1128,1408,1786],{"slug":3493,"featured":6,"template":683},"have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain","content:en-us:blog:have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain.yml","Have Devops Jobs To Fill Try These 3 Strategies To Hire And Retain","en-us/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain.yml","en-us/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain",{"_path":3499,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3500,"content":3506,"config":3512,"_id":3514,"_type":16,"title":3515,"_source":18,"_file":3516,"_stem":3517,"_extension":21},"/en-us/blog/how-the-devops-platform-makes-building-accessible-software-easier",{"title":3501,"description":3502,"ogTitle":3501,"ogDescription":3502,"noIndex":6,"ogImage":3503,"ogUrl":3504,"ogSiteName":697,"ogType":698,"canonicalUrls":3504,"schema":3505},"It's time to build more accessible software. A DevOps platform can help","Shifting accessibility left can make building accessible products simpler and more efficient. A DevOps platform makes it easier to customize and adjust priorities to suit your business needs.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667281/Blog/Hero%20Images/accessibility.jpg","https://about.gitlab.com/blog/how-the-devops-platform-makes-building-accessible-software-easier","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"It's time to build more accessible software. A DevOps platform can help\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2021-09-21\",\n      }",{"title":3501,"description":3502,"authors":3507,"heroImage":3503,"date":3509,"body":3510,"category":14,"tags":3511},[3508],"Sara Kassabian","2021-09-21","\n\nThe earlier a feature or process is introduced in the multi-step software development lifecycle (SDLC), the more likely it is to be fully integrated into the product. \n\nIt's well documented how security can [shift left using a DevOps platform](/blog/devops-platform-supply-chain-attacks/), so it's time to make the case that accessbility needs to be thought about earlier (and, clearly, a DevOps platform can facilitate that too). Although there are laws that require applications to meet certain accessibility requirements, which [opens an application up to a broader user base](/blog/how-the-open-source-community-can-build-more-accessible-products/), rarely is accessibility considered a core product requirement. Instead, it is just a test tacked on at the end instead of being built into the DevOps platform process.\n\n**[Learn more about [how the open source development community helps build accessible software](/blog/how-the-open-source-community-can-build-more-accessible-products/)]**\n\n\"The problem really is that accessibility is not usually explicitly defined as a problem,\" said Segun Ola, a frontend web developer at engineering talent finder Andela, during a presentation at accessibility conference [axe-con](https://www.deque.com/axe-con/). \"Most of the time, developers go through the product lifecycle and we identify all the other problems with a product or all of the things we want to solve and ignore accessibility for the greater part.\"\n\n## Accessibility in software development: It starts with education\n\nOftentimes, omissions are unintentional and have more to do with a lack of awareness around why accessibility in software development is so important for many people living with disabilities and a key driver of business value. After all, the more accessible your product, the more users can benefit from it.\n\n\"I have met software engineers and designers who did not even know that there's a thing called a screen reader,\" Ola said. \"Just last week, I was reviewing some code and explaining why the code needed to be refactored. A junior engineer asked me ‘what is a screen reader?’ So I had to get on a call with him and show him how screen readers work. And then he asks me, ‘What's the point of a screen reader?’ And I had to tell him: ‘Oh yeah there are people who can't see the way you and I see.’\"\n\nThis is just one example of why having empathy and education around accessibility so important, says [Taurie Davis](/company/team/#tauriedavis), product design manager on Ecosystems at GitLab. Earlier in 2021, the GitLab UX team set a [goal to become a department of accessibility experts and advocates at GitLab](https://gitlab.com/groups/gitlab-org/-/epics/5235) by completing a 26-hour training at [Deque University](https://dequeuniversity.com/) on accessibility in software development.\n\n## Ignoring accessibility? Expect more technical debt\n\nSometimes software companies will see investment in accessibility components for a product as expensive and/or as a trade-off for innovation. Development teams that wait until the end of the SDLC to think about accessibility are more likely to have coded components that are inaccessible, only to have to go back and rework them to suit legal accessibility standards. This process can lead to an immense amount of technical debt.\n\n**Take a deep dive into [all aspects of the DevOps platform](/solutions/devops-platform/)**\n\n\"Once a team does start to become educated about accessibility and they have the empathy and have the drive to make the change and start shifting accessibility left it's easy to see all of the debt that you've accrued around accessibility,\" says Taurie. \"It can be really expensive to get yourself out of that debt.\" Taurie points to examples such as having to go back to change variables for color contrast, and ensuring that filtering and tab reordering can be done in a way that screen readers understand it.\n\n\"There are just so many different aspects and elements that could cause you to go back and just rewrite how the entire feature was originally developed and that can affect every aspect of your product,\" she adds.\n\n## Other barriers to implementing accessibility earlier\n\nFor UX designers like [Jeremy Elder](/company/team/#jeldergl), staff product designer on Ecosystems at GitLab, and Taurie, the typical workflow is about testing artifacts and responding to customer feedback, as opposed to thinking proactively about how someone might use the product.\n\n\"It’s more of a softer skill to think through a lot of those abstract ideas and what-ifs upfront rather than just saying, ‘Hey, we need this widget to do XYZ,’\" says Jeremy. \"Instead of asking questions like ‘how might somebody want to use this? How does it fit in their workflow?’. That is more inclusive thinking that helps you to do that, but it's harder and not as common.\"\n\n## Building accessible software isn’t just ethical, it drives business value\n\nOftentimes accessibility in software development is framed around building products to better serve people living with disabilities. While this is essential and ethical, accessibility can also be about building software products that can easily adapt to a user’s workflow.\n\n\"It’s more rigor around understanding workflows and how somebody is wanting to use it and less about focusing necessarily on a disability per se, or an outcome,\" says Jeremy. \"You want to think about personas or jobs to be done, not just think about the ultimate task, but how somebody is achieving that task.\"\n\n**[Ten key features](/topics/devops-platform/) of a DevOps platform**\n\nProducts that are customizable and adaptable are more likely to pique the interest of clients who have specific needs (e.g., a screenreader) or workflow preferences (e.g., using a particular type of keyboard).\n\n## What are the solutions?\n\nThe simplest solution to building more accessible software solutions is to think about accessibility at the beginning of the SDLC, rather than waiting until the end. Companies that use a complete DevOps platform like GitLab will find it simpler to take iterative steps toward shifting accessibility left. Need an example? Make accessibility part of the requirements a dev team needs to complete before a particular feature can be considered \"done.\" One way to do this would be to update issue templates and MR templates to ensure an accessibility step is part of the checklist.\n\nWhether it’s security or accessibility, shifting something left is about bringing the conversation to the beginning of the SDLC, something made much more straightforward with a DevOps platform. When it comes to accessibility, the more accessible the product is, the broader the pool of users (and future customers) can benefit.\n\n## Read more about the DevOps Platform:\n\n- [The journey to a DevOps Platform](/blog/the-journey-to-a-devops-platform/)\n\n- [How ten steps over ten years led to the DevOps Platform](/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform/)\n\n- [Making the case for a DevOps platform: What data and customers say](/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say/)\n\n- [Agile planning with a DevOps platform](/blog/agile-planning-with-a-devops-platform/)\n\n- [Welcome to the DevOps Platform era](/blog/welcome-to-the-devops-platform-era/)\n\n",[1128,1210,1488],{"slug":3513,"featured":6,"template":683},"how-the-devops-platform-makes-building-accessible-software-easier","content:en-us:blog:how-the-devops-platform-makes-building-accessible-software-easier.yml","How The Devops Platform Makes Building Accessible Software Easier","en-us/blog/how-the-devops-platform-makes-building-accessible-software-easier.yml","en-us/blog/how-the-devops-platform-makes-building-accessible-software-easier",{"_path":3519,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3520,"content":3525,"config":3531,"_id":3533,"_type":16,"title":3534,"_source":18,"_file":3535,"_stem":3536,"_extension":21},"/en-us/blog/qpage-on-the-devops-platform",{"title":3521,"description":3522,"ogTitle":3521,"ogDescription":3522,"noIndex":6,"ogImage":1243,"ogUrl":3523,"ogSiteName":697,"ogType":698,"canonicalUrls":3523,"schema":3524},"QPage improves deployment & efficiency using GitLab platform","QPage went from a homegrown CI/CD solution to the GitLab DevOps Platform and found more benefits than expected.","https://about.gitlab.com/blog/qpage-on-the-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How QPage achieved automatic deployment and efficiency using the GitLab DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-09-15\",\n      }",{"title":3526,"description":3522,"authors":3527,"heroImage":1243,"date":3528,"body":3529,"category":14,"tags":3530},"How QPage achieved automatic deployment and efficiency using the GitLab DevOps Platform",[1524],"2021-09-15","Deployment automation is essential for any company involved in software development to stay competitive. [QPage](https://www.qpage.one/), a company that provides an end-to-end sourcing and recruitment solution for SMEs, realized it quickly and migrated to GitLab’s DevOps Platform to accelerate their deployment process.\n\nWe spoke with Pouya Lotfi, the co-founder of QPage, to see how they use GitLab at QPage and how it has helped the company.\n\n## Why GitLab?\n\nQPage was initially using a local bespoke CI/CD for about the first two months, but they soon realized they needed a more professional DevOps Platform system. Because Pouya and the team at QPage had already used GitLab at a previous employer, they knew it would be the right fit. So, they didn’t consider other options and opted for GitLab straight away.\n\n**Everything you need to know about [a DevOps platform](/solutions/devops-platform/)**\n\n \"We started from the local CI/CD, but soon we realized that would be something we can actually do with GitLab,” said Pouya Lotfi, co-founder QPage. “I had the experience with GitLab back in the other companies I was part of, so we soon actually migrated to GitLab, and we brought everything we could actually have in GitLab’s DevOps Platform to accelerate our deployment and the processes.”\n\nQPage chose GitLab’s paid subscription plan.\n\n## How GitLab’s DevOps Platform works\n\nQPage is using several CI/CD integrations that GitLab offers.\n\n\"We are using it end-to-end, but we did use the benefit of integrating it with other platforms as well,\" Pouya said.\n\nThey are using the GitLab-Kubernetes integration for CI/CD funnels, which allows building, testing, and deploying to cluster, as well as using Auto DevOps to automate the CI/CD process.\n\nAnother key integration for QPage is the JIRA integration - they get notifications and assign a ticket to one of the developers/engineers. However, a part of this process is still done manually as they are not yet using issues, boards, and milestones within GitLab. But, they are considering using GitLab altogether to automate the whole process.\n\n**Get the [most out of your DevOps platform](/topics/devops/seven-tips-to-get-the-most-out-of-your-devops-platform/)**\n\nQPage is also taking advantage of the Docker-GitLab integration. They use containers and images, push them through the GitLab CI and then finally deploy.\n\nThey start with the staging environment, then move to testing and QA, and finally, they push it to the production; their deployment and release part is divided into staging and production. For deployment, QPage is using cloud providers AWS and Digital Ocean.\n\n## The dev team and GitLab\n\nThe developers at QPage find GitLab an easy solution to work with because they already knew how it worked; one of QPage’s basic criteria to hire a developer or an engineer is to have experience with using GitLab or GitHub CI/CD.\n\nAdditionally, they find GiLab’s documentation very helpful. When they come across any problem with using GitLab, they quickly reach for the documentation to solve their problems. This eliminates the bottleneck of depending on one person on the team, who is an expert, to solve a problem.\n\n## Key DevOps Platform benefits\n\nOne of the major benefits QPage has seen from using GitLab is achieving automatic deployment. GitLab has made their CI/CD process more efficient as they have integrated it with tools like Kubernetes, Docker, and JIRA.\n\nThey believe the management within GitLab is also a huge plus where they can now test the codes and push them. Additionally, they like the visibility of work and collaboration among the developers. Their team can now know the status of the deployment in terms of whether it was successful or it failed and where it was deployed, such as the staging environment or the production.\n\n**How [DevOps gets easier](https://learn.gitlab.com/smb-devops-1/simplify-devops) with a DevOps platform**\n\nAnother big benefit of migrating to GitLab is the operational efficiency. Their deployment time has now reduced by 80% - with the local CI/CD, it took around 6-8 hours, but with GitLab, it’s between 15-20 minutes.\n\n \"In the beginning, when we had done it through the local server CI/CD, it would take around 6-8 or 10 hours, and that was a real hassle for us,\" Pouya said. “With our GitLab migration, and we push something to production, it takes like 15 to 20 to 30 minutes.”\n\nAlthough QPage has one main product, they have around 29 sub-products, like API algorithms, and they've seen great optimization in deployment with all of their products after using GitLab.\n\nLast but not least, QPage believes using GitLab is also cost-effective for them.",[1128,1528,793],{"slug":3532,"featured":6,"template":683},"qpage-on-the-devops-platform","content:en-us:blog:qpage-on-the-devops-platform.yml","Qpage On The Devops Platform","en-us/blog/qpage-on-the-devops-platform.yml","en-us/blog/qpage-on-the-devops-platform",{"_path":3538,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3539,"content":3545,"config":3551,"_id":3553,"_type":16,"title":3554,"_source":18,"_file":3555,"_stem":3556,"_extension":21},"/en-us/blog/lessons-weet-learned-lokalise",{"title":3540,"description":3541,"ogTitle":3540,"ogDescription":3541,"noIndex":6,"ogImage":3542,"ogUrl":3543,"ogSiteName":697,"ogType":698,"canonicalUrls":3543,"schema":3544},"How Weet integrates localization into the GitLab pipeline with Lokalise","Localization is an increasingly important option for users. Here's how to integrate localization in your GitLab pipeline.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668543/Blog/Hero%20Images/lokalise_cover.png","https://about.gitlab.com/blog/lessons-weet-learned-lokalise","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Weet integrates localization into the GitLab pipeline with Lokalise\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Alexander Pereverzevs\"}],\n        \"datePublished\": \"2021-09-13\",\n      }",{"title":3540,"description":3541,"authors":3546,"heroImage":3542,"date":3548,"body":3549,"category":14,"tags":3550},[3547],"Alexander Pereverzevs","2021-09-13","\n\nAs a GitLab customer, Weet has fully invested in the premise of \"Iterate faster, innovate together.\" Weet has a low tolerance for processes that don't keep pace with the way they develop and launch. One important process that was slowing the business down – localizing their app.\n\nLocalization is a key way to drive growth and accelerate product adoption. When done poorly, localization or l10n, as it's commonly known, can slow down your development process, introduce bugs, and make it cumbersome to launch updates. When done right, teams can smooth out the process and [continuously localize](https://lokalise.com/features/localization-process-automation) their app. We unpack how Weet conquered its localization problems using GitLab and Lokalise.\n\n## What is Weet?\n\n[Weet](https://beeweet.com) is an asynchronous video communication tool designed to reduce the need for meetings. By combining video, audio, and screen sharing, it provides the nuance that written communication simply does not. For example, Weet's 10-person team, which is spread between France and the US, uses the product to speed through code reviews. The product has also been used for demos, design feedback, bug reports, QA reviews, and client presentations. At Lokalise, we use the tool to communicate with team members across time zones with ease and clarity.\n\nWeet started using GitLab five years ago and is using the latest version (13.11 as of this writing). For the runner they use 13.11 too, with an auto-scalable configuration (best feature ever!). The instance is self-managed on Google Cloud.\n\nWeet uses roughly 50 pipelines to manage processes such as: building the entire stack of the Weet application, checking the unit tests, deploying to a QA environment, deploying in production, launching the end-to-end tests, and more. The company currently has 17 projects set up, which are combined with GitLab CI/CD to deploy the Weet application.\n\nThey are, in summary... GitLab fans.\n\n## The first l10n solution\n\nWhen Weet first started localizing their app the engineering team considered two options:\n\n1. Download CSV files of strings, email them to the translators, and then reintegrate the data after the translation work was complete\n2. Translate directly in the IDE\n\nBoth options had their drawbacks. Downloading and uploading files takes developers out of the flow, but worse than that, the process can introduce l10n bugs that make the app look unreliable or amateurish. Also, these problems take time to resolve. It's not uncommon for version control to be an issue with this type of system.\n\nWeet chose the Web IDE option because it was easier to get started, but the process wasn't working at the pace they wanted.\n\n>> \"Before we used the Lokalise integration, we had to validate the new wording before each code push. The process was time-consuming as approvers were spread across different time zones,\" - Geraud Bonou-Selegbe, Full-stack engineer at Weet.\n\nHunting through the code to change all the instances of a word that needs to be replaced is not high on anyone's list of fun things to do.\n\nIt wasn't long before Jeremy Rouet, the CTO and co-founder of Weet, started looking for new options. If they wanted to fulfill the CI/CD promise of GitLab, they needed a tool that would integrate cleanly into the pipeline. Jeremy began testing translation management systems (TMS) and settled on integrating [Lokalise with GitLab](https://docs.lokalise.com/en/articles/1789855-gitlab).\n\n## How to continuously localize your product\n\nLokalise integrates into GitLab and allows a user (like Weet) to pull files into Lokalise, where translation tasks can be assigned and completed and then easily merged back.\n\n![Schema of how Lokalise works in GitLab](https://about.gitlab.com/images/blogimages/lokalize1.png){: .shadow.medium.center}\nA schema of how Lokalise works in GitLab.\n{: .note.text-center}\n\nDevelopers code as normal aiming to complete their work prior to each weekly release. Each push on master sends text strings automatically into Lokalise. Lokalise detects any changes to the text, so the developers don't have to remember what exactly they changed. Jeremy then uses the task features in Lokalise to assign the translation tasks to the Weet marketing team, who then go in and check all the new words.\n\nOnce the translation team is done, they create a merge request, and the product is ready to launch.\n\n>> \"Lokalise enabled us to bridge this gap by letting developers do what they do the best: coding. If my phrasing is not perfect, language experts can review it on Lokalise and then send a merge request with their updates. Now we've got the right expert in the right place for each milestone of our development process,\" says Geraud.\n\n![Lokalise Merge Request in GitLab](https://about.gitlab.com/images/blogimages/lokalize2.png){: .shadow.medium.center}\nWhat a merge request looks like using Lokalise and GitLab.\n{: .note.text-center}\n\nGone are the days of manually updating translations in the IDE in order to fix phrasing. Now app localization is a seamless and reliable part of the development workflow of the CI/CD process that is built around GitLab.\n\n## Steps to set up the integration\n\nFull instructions are available here. With over 500 keys in the app, the Weet team created several internal processes to keep their work tidy.\nOne move they made was to split their localization data into 5 projects/files. Each localization is a .json file. The separate files are:\n\n- emails\n- frontend\n- integration\n- server-side rendering\n- mobile – iOS/Android (WIP)\n\nThen to simplify key maintenance they used a naming pattern so that each component has its own keys. When they delete a component, they simply remove the main key from the localization file, which removes each label for this component. See below:\n\n![Deleting a component and removing main key from the localization file](https://about.gitlab.com/images/blogimages/lokalize3.png){: .shadow.medium.center}\nHow to delete a component and remove the main key from the localization file.\n{: .note.text-center}\n\nFinally, they tackled conflicts. The developers are able to edit the localization files both in Lokalise and in their environment. Changes in multiple systems could clash. To solve this problem, they decided that developers can only use Lokalise to update labels and they can only add or remove keys in their local environment.\n\n## What localization delivers\n\nIt took the Weet team some time and trial and error to smooth out the process.\n\nNow that the process is totally seamless, they can localize a new release in less than an hour with just a short quality check. That’s a big improvement from the days when they had to synchronize the dev, PO, and QA teams over a few days, to check and correct the new localization.\n\nWith their ability to continuously localize their app, they can focus on developing and delivering the best product possible. And it seems to be working as they were recently voted the #2 (closed) product of the week on Product Hunt. Coming up on the roadmap – mobile apps and more languages.\n",[1428,233,1488],{"slug":3552,"featured":6,"template":683},"lessons-weet-learned-lokalise","content:en-us:blog:lessons-weet-learned-lokalise.yml","Lessons Weet Learned Lokalise","en-us/blog/lessons-weet-learned-lokalise.yml","en-us/blog/lessons-weet-learned-lokalise",{"_path":3558,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3559,"content":3565,"config":3570,"_id":3572,"_type":16,"title":3573,"_source":18,"_file":3574,"_stem":3575,"_extension":21},"/en-us/blog/setting-up-the-k-agent",{"title":3560,"description":3561,"ogTitle":3560,"ogDescription":3561,"noIndex":6,"ogImage":3562,"ogUrl":3563,"ogSiteName":697,"ogType":698,"canonicalUrls":3563,"schema":3564},"How to deploy the GitLab Agent for Kubernetes with limited permissions"," Learn how to deploy the GitLab Agent for Kubernetes with Limited Permissions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668655/Blog/Hero%20Images/seabass-creatives-U3m4_cKbUfc-unsplash.jpg","https://about.gitlab.com/blog/setting-up-the-k-agent","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy the GitLab Agent for Kubernetes with limited permissions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2021-09-10\",\n      }",{"title":3560,"description":3561,"authors":3566,"heroImage":3562,"date":3567,"body":3568,"category":14,"tags":3569},[1086],"2021-09-10","\n\nThe [GitLab Agent for Kubernetes (`agentk`)](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent) is an active in-cluster component for solving GitLab and Kubernetes integration tasks in a secure and cloud-native way. The `agentk` communicates to the GitLab Agent Server (KAS) to perform [GitOps](https://about.gitlab.com/topics/gitops/) operations.\n\nIn many examples, we see the agent being deployed with global-level permissions on your Kubernetes cluster. There are use cases where we want to reduce the scope of what agentk has access to. In this guide I will provide information on deploying agentk on your cluster, limiting what namespaces it can access, as well as using it to deploy your applications.\n\nPrefer a video? Watch the walkthrough below to learn how to deploy agentk to your cluster:\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube-nocookie.com/embed/Sr3X5-O9HWA\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n## How it works\n\nAnytime a developer performs changes to a manifest file managed within GitLab, the agentk will apply these changes to the Kubernetes cluster.\n\n![Kagent flowchart](https://about.gitlab.com/images/blogimages/kagent-limited/1.png){: .shadow.medium}\nHow a change to a manifest file in GitLab is applied to the Kubernetes cluster.\n{: .note.text-center}\n\nThe `agentk` and the KAS use bidirectional streaming to allow the connection acceptor (the gRPC server, GitLab Agent Server) to act as a client. The connection acceptor sends requests as gRPC replies.\n\n![Bidirectional streaming flowchart](https://about.gitlab.com/images/blogimages/kagent-limited/2.png){: .shadow.medium}\nHow bidirectional streaming with agentk works.\n{: .note.text-center}\n\n- GitLab RoR is the main GitLab application. It uses gRPC to talk to kas.\n\n- `agentk` is the GitLab Agent for Kubernetes. It keeps a connection established to a\nkas instance, waiting for requests to process. It may also actively send information\nabout things happening in the cluster.\n\n- KAS is the GitLab Agent Server, and is responsible for:\n  - Accepting requests from agentk\n  - Authentication of requests from agentk by querying GitLab RoR\n  - Fetching the agent's configuration file from a corresponding Git repository by querying Gitaly\n  - Matching incoming requests from GitLab RoR with existing connections from the right agentk, forwarding requests to it, and forwarding responses back\n  - Polling manifest repositories for GitOps support by communicating with Gitaly\n\n## How to deploy the GitLab Agent\n\nIn order to deploy the agent, we require the following:\n\n- Kubernetes cluster (I am using Google Kubernetes Engine, or GKE)\n- The GitLab project which will hold the agentk configuration and deployment manifest, you can import [Simple Agent K](https://gitlab.com/tech-marketing/devsecops/kubernetes-agent/simple-agent-k) which includes an application and CICD configured\n\n**Note:** The agentk configuration file and deployment manifests can be located in different projects. It just depends how you want to organize the GitOps workflow.\n\n**1. Create `.gitlab/agent/agent-name/config.yaml` directory in your project** and replace `agent-name` with whatever you want to name your agent.\n\n  ```\n  gitops:\n    manifest_projects:\n    - id: \"Your Project ID\"\n      paths:\n      - glob: '/manifests/*.{yaml,yml,json}'\n  ```\n\n  Remember to replace `Your Project ID` with the projectID of your project, seen below:\n\n   ![Replace projectID for your project](https://about.gitlab.com/images/blogimages/kagent-limited/3.png){: .shadow.medium}\n   Fill in the projectID section with your information.\n   {: .note.text-center}\n\n  **Note:** You can also use the path to the project in GitLab, i.e., mygroup/mysub/myproject.\n\n**2. Create agent record in GitLab**\n\n  A GitLab Rails Agent record is used to associate the cluster with the configuration repository project.\n\n  - Go to **Infrastructure > Kubernetes** tab\n\n   ![Click Kubernetes cluster tab](https://about.gitlab.com/images/blogimages/kagent-limited/4.png){: .shadow.medium}\n   Click the Kubernetes cluster tab in GitLab.\n   {: .note.text-center}\n\n  - Click on the **GitLab Agent managed clusters** tab\n\n   ![Click GitLab Agent tab](https://about.gitlab.com/images/blogimages/kagent-limited/5.png){: .shadow.medium}\n   What the GitLab Agent tab looks like\n   {: .note.text-center}\n\n  - Click the **Install a new GitLab Agent** button\n\n   ![Click Install new GitLab Agent button](https://about.gitlab.com/images/blogimages/kagent-limited/5.png){: .shadow.medium}\n   What the \"Install new GitLab agent\" button looks like.\n   {: .note.text-center}\n\n  - Select your agent\n\n   ![How to select your agent in GitLab](https://about.gitlab.com/images/blogimages/kagent-limited/6.png){: .shadow.medium}\n   How to select your agent in GitLab\n   {: .note.text-center}\n\n  - Save the provided token\n\n   ![How to save your provided token](https://about.gitlab.com/images/blogimages/kagent-limited/7.png){: .shadow.medium}\n   Click here to save your provided token.\n   {: .note.text-center}\n\n**3. Open a Terminal window**\n\n**4. Scope kubectl to your cluster**\n\n  ```\n  $ gcloud container clusters get-credentials fern-gitops-2 --zone us-central1-c --project group-cs-9b54eb\n\n  Fetching cluster endpoint and auth data.\n  kubeconfig entry generated for fern-gitops-2.\n  ```\n\n**5. Create the namespace for the Kubernetes agent**\n\n  ```\n  $ kubectl create ns gitlab-kubernetes-agent\n\n  namespace/gitlab-kubernetes-agent created\n  ```\n\n**6. Create agent secret**\n\n  This secret is used to store the token needed to configure the agent.\n\n  ```\n  $ kubectl create secret generic -n gitlab-kubernetes-agent gitlab-kubernetes-agent-token --from-literal=token='YOUR_AGENT_TOKEN'\n\n  secret/gitlab-kubernetes-agent-token created\n  ```\n\n**7. Apply the agentk deployment with limited access**\n\n  In this deployment below, we will create the following:\n\n### Namespaces\n\n  - **gitlab-kubernetes-agent**: Where the agent will be deployed\n  - **dude**: A namespace where agentk has permission to deploy\n  - **naww**: A namespace where the agentk has no permissions\n\n### Service accounts\n\n  - **gitlab-kubernetes-agent**: Service account used for running agentk\n\n### Deployments\n\n  - **gitlab-kubernetes-agent**: The actual agentk client application\n\n### Cluster roles and bindings\n\n  - **gitlab-kubernetes-agent-write-cm:** Permission for agentk to write all configmaps on the cluster\n  - **gitlab-kubernetes-agent-read-cm:** Permission for agentk to read all configmaps on the cluster\n\n### Roles and bindings\n\n  - **gitlab-kubernetes-agent-write**: Permission for agentk to write all resources on gitlab-kubernetes-agent ns\n  - **gitlab-kubernetes-agent-read**: Permission for agentk to read all resources on gitlab-kubernetes-agent ns\n  - **gitlab-kubernetes-agent-write-dude**: Permission for agentk to write all resources on dude ns\n  - **gitlab-kubernetes-agent-read-dude**: Permission for agentk to read all resources on dude ns\n\nThe next step is to create the deployment file `agentk.yaml`:\n\n  ```\n  apiVersion: v1\n  kind: Namespace\n  metadata:\n    name: dude\n  ---\n  apiVersion: v1\n  kind: Namespace\n  metadata:\n    name: naww\n  ---\n  apiVersion: v1\n  kind: ServiceAccount\n  metadata:\n    name: gitlab-kubernetes-agent\n    namespace: gitlab-kubernetes-agent\n  ---\n  apiVersion: apps/v1\n  kind: Deployment\n  metadata:\n    name: gitlab-kubernetes-agent\n    namespace: gitlab-kubernetes-agent\n  spec:\n    replicas: 1\n    selector:\n      matchLabels:\n        app: gitlab-kubernetes-agent\n    template:\n      metadata:\n        labels:\n          app: gitlab-kubernetes-agent\n        namespace: gitlab-kubernetes-agent\n      spec:\n        serviceAccountName: gitlab-kubernetes-agent\n        containers:\n        - name: agent\n          image: \"registry.gitlab.com/gitlab-org/cluster-integration/gitlab-agent/agentk:stable\"\n          args:\n          - --token-file=/config/token\n          - --kas-address\n          - wss://kas.gitlab.com # for GitLab.com users, use this KAS.\n          volumeMounts:\n          - name: token-volume\n            mountPath: /config\n        volumes:\n        - name: token-volume\n          secret:\n            secretName: gitlab-kubernetes-agent-token\n    strategy:\n      type: RollingUpdate\n      rollingUpdate:\n        maxSurge: 0\n        maxUnavailable: 1\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: ClusterRole\n  metadata:\n    name: gitlab-kubernetes-agent-write-cm\n  rules:\n  - resources:\n    - 'configmaps'\n    apiGroups:\n    - ''\n    verbs:\n    - create\n    - update\n    - delete\n    - patch\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: ClusterRoleBinding\n  metadata:\n    name: gitlab-kubernetes-agent-write-binding-cm\n  roleRef:\n    name: gitlab-kubernetes-agent-write-cm\n    kind: ClusterRole\n    apiGroup: rbac.authorization.k8s.io\n  subjects:\n  - name: gitlab-kubernetes-agent\n    kind: ServiceAccount\n    namespace: gitlab-kubernetes-agent\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: ClusterRole\n  metadata:\n    name: gitlab-kubernetes-agent-read-cm\n  rules:\n  - resources:\n    - 'configmaps'\n    apiGroups:\n    - ''\n    verbs:\n    - get\n    - list\n    - watch\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: ClusterRoleBinding\n  metadata:\n    name: gitlab-kubernetes-agent-read-binding-cm\n  roleRef:\n    name: gitlab-kubernetes-agent-read-cm\n    kind: ClusterRole\n    apiGroup: rbac.authorization.k8s.io\n  subjects:\n  - name: gitlab-kubernetes-agent\n    kind: ServiceAccount\n    namespace: gitlab-kubernetes-agent\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: Role\n  metadata:\n    namespace: gitlab-kubernetes-agent\n    name: gitlab-kubernetes-agent-write\n  rules:\n  - resources:\n    - '*'\n    apiGroups:\n    - '*'\n    verbs:\n    - create\n    - update\n    - delete\n    - patch\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: RoleBinding\n  metadata:\n    namespace: gitlab-kubernetes-agent\n    name: gitlab-kubernetes-agent-write-binding\n  roleRef:\n    name: gitlab-kubernetes-agent-write\n    kind: Role\n    apiGroup: rbac.authorization.k8s.io\n  subjects:\n  - name: gitlab-kubernetes-agent\n    kind: ServiceAccount\n    namespace: gitlab-kubernetes-agent\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: Role\n  metadata:\n    namespace: gitlab-kubernetes-agent\n    name: gitlab-kubernetes-agent-read\n  rules:\n  - resources:\n    - '*'\n    apiGroups:\n    - '*'\n    verbs:\n    - get\n    - list\n    - watch\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: RoleBinding\n  metadata:\n    namespace: gitlab-kubernetes-agent\n    name: gitlab-kubernetes-agent-read-binding\n  roleRef:\n    name: gitlab-kubernetes-agent-read\n    kind: Role\n    apiGroup: rbac.authorization.k8s.io\n  subjects:\n  - name: gitlab-kubernetes-agent\n    kind: ServiceAccount\n    namespace: gitlab-kubernetes-agent\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: Role\n  metadata:\n    namespace: dude\n    name: gitlab-kubernetes-agent-write-dude\n  rules:\n  - resources:\n    - '*'\n    apiGroups:\n    - '*'\n    verbs:\n    - create\n    - update\n    - delete\n    - patch\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: RoleBinding\n  metadata:\n    namespace: dude\n    name: gitlab-kubernetes-agent-write-binding-dude\n  roleRef:\n    name: gitlab-kubernetes-agent-write-dude\n    kind: Role\n    apiGroup: rbac.authorization.k8s.io\n  subjects:\n  - name: gitlab-kubernetes-agent\n    kind: ServiceAccount\n    namespace: gitlab-kubernetes-agent\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: Role\n  metadata:\n    namespace: dude\n    name: gitlab-kubernetes-agent-read-dude\n  rules:\n  - resources:\n    - '*'\n    apiGroups:\n    - '*'\n    verbs:\n    - get\n    - list\n    - watch\n  ---\n  apiVersion: rbac.authorization.k8s.io/v1\n  kind: RoleBinding\n  metadata:\n    namespace: dude\n    name: gitlab-kubernetes-agent-read-binding-dude\n  roleRef:\n    name: gitlab-kubernetes-agent-read-dude\n    kind: Role\n    apiGroup: rbac.authorization.k8s.io\n  subjects:\n  - name: gitlab-kubernetes-agent\n    kind: ServiceAccount\n    namespace: gitlab-kubernetes-agent\n  ```\n\nNow we can apply the deployment with the following command:\n\n  ```\n  $ kubectl apply -f k-agent.yaml\n\n  namespace/dude created\n  namespace/naww created\n  serviceaccount/gitlab-kubernetes-agent created\n  deployment.apps/gitlab-kubernetes-agent created\n  clusterrole.rbac.authorization.k8s.io/gitlab-kubernetes-agent-write-cm created\n  clusterrolebinding.rbac.authorization.k8s.io/gitlab-kubernetes-agent-write-binding-cm created\n  clusterrole.rbac.authorization.k8s.io/gitlab-kubernetes-agent-read-cm created\n  clusterrolebinding.rbac.authorization.k8s.io/gitlab-kubernetes-agent-read-binding-cm created\n  role.rbac.authorization.k8s.io/gitlab-kubernetes-agent-write created\n  rolebinding.rbac.authorization.k8s.io/gitlab-kubernetes-agent-write-binding created\n  role.rbac.authorization.k8s.io/gitlab-kubernetes-agent-read created\n  rolebinding.rbac.authorization.k8s.io/gitlab-kubernetes-agent-read-binding created\n  role.rbac.authorization.k8s.io/gitlab-kubernetes-agent-write-dude created\n  rolebinding.rbac.authorization.k8s.io/gitlab-kubernetes-agent-write-binding-dude created\n  role.rbac.authorization.k8s.io/gitlab-kubernetes-agent-read-dude created\n  rolebinding.rbac.authorization.k8s.io/gitlab-kubernetes-agent-read-binding-dude created\n  ```\n\n  **Note:** You see we are giving permissions to the gitlab-kubernetes-agent on the `dude` namespace, but not on the `naww` namespace. Currently, permissions for ConfigMaps are necessary but the scope can be reduced.\n\n**8. Make sure agentk is running**\n\n  ```\n  $ kubectl get pods -n gitlab-kubernetes-agent\n\n  NAME                            READY   STATUS    RESTARTS   AGE\n  gitlab-agent-58869d96bd-nqqnf   1/1     Running   0          10s\n  ```\n\nNow that the agentk is deployed, it can start managing our Kubernetes deployments.\n\n## Managing deployments\n\nNow let's go back to the GitLab UI, and add some applications to deploy using GitOps.\n\n**1. Open the Web IDE and create a manifest folder in your project root**\n\n**2. Add a manifest file for what you want to deploy on the `dude` namespace, name it `dude.yaml`**\n\n  ```\n  apiVersion: apps/v1\n  kind: Deployment\n  metadata:\n    name: nginx-deployment-dude\n    namespace: dude  # Can be any namespace managed by you that the agent has access to.\n  spec:\n    selector:\n      matchLabels:\n        app: nginx\n    replicas: 1\n    template:\n      metadata:\n        labels:\n          app: nginx\n      spec:\n        containers:\n        - name: nginx\n          image: nginx:1.14.2\n          ports:\n          - containerPort: 80\n  ```\n\n**3. Add a manifest file for what you want to deploy on the `naww` namespace and name it `naww.yaml`**\n\n  ```\n  apiVersion: apps/v1\n  kind: Deployment\n  metadata:\n    name: nginx-deployment-naww\n    namespace: naww  # Can be any namespace managed by you that the agent has access to.\n  spec:\n    selector:\n      matchLabels:\n        app: nginx\n    replicas: 1\n    template:\n      metadata:\n        labels:\n          app: nginx\n      spec:\n        containers:\n        - name: nginx\n          image: nginx:1.14.2\n          ports:\n          - containerPort: 80\n  ```\n\n**4. Commit changes and wait for the pipeline to run**\n\n**5. Check dude namespace**\n\n  ```\n  $ kubectl get pods -n dude\n\n  NAME                                     READY   STATUS    RESTARTS   AGE\n  nginx-deployment-dude-66b6c48dd5-rpxx2   1/1     Running   0          6m22s\n  ```\n\n  Notice that the application has deployed.\n\n**6. Check naww namespace**\n\n  ```\n  $ kubectl get pods -n naww\n\n  No resources found in naww namespace.\n  ```\n\n  Notice there is nothing on there.\n\n**7. Look at the k-agent logs**\n\n  ```\n  $ kubectl get pods -n gitlab-kubernetes-agent\n\n  NAME                            READY   STATUS    RESTARTS   AGE\n  gitlab-agent-58869d96bd-nqqnf   1/1     Running   0          10s\n\n  $ kubectl logs gitlab-agent-58869d96bd-nqqnf -n gitlab-kubernetes-agent\n\n  {\"level\":\"info\",\"time\":\"2021-08-19T19:17:26.088Z\",\"msg\":\"Feature status change\",\"feature_name\":\"tunnel\",\"feature_status\":true}\n  {\"level\":\"info\",\"time\":\"2021-08-19T19:17:26.088Z\",\"msg\":\"Observability endpoint is up\",\"mod_name\":\"observability\",\"net_network\":\"tcp\",\"net_address\":\"[::]:8080\"}\n  {\"level\":\"info\",\"time\":\"2021-08-19T19:17:26.375Z\",\"msg\":\"Starting synchronization worker\",\"mod_name\":\"gitops\",\"project_id\":\"devsecops/gitops-project\"}\n  ...\n  ```\n\n  You should see logs as follows:\n\n  Application successfully deployed to `dude`\n\n  ```\n  {\"level\":\"info\",\"time\":\"2021-08-20T22:03:57.561Z\",\"msg\":\"Synchronizing objects\",\"mod_name\":\"gitops\",\"project_id\":\"29010173\",\"agent_id\":711,\"commit_id\":\"221499beaf2dcf267cd40324235570001e928817\"}\n  {\"eventType\":\"resourceStatus\",\"group\":\"apps\",\"kind\":\"Deployment\",\"message\":\"Deployment is available. Replicas: 1\",\"name\":\"nginx-deployment-dude\",\"namespace\":\"dude\",\"status\":\"Current\",\"timestamp\":\"2021-08-20T22:03:58Z\",\"type\":\"status\"}\n  ```\n\n  Application failed to deploy to `naww`\n\n  ```\n  {\"eventType\":\"resourceStatus\",\"group\":\"apps\",\"kind\":\"Deployment\",\"message\":\"\",\"name\":\"nginx-deployment-naww\",\"namespace\":\"naww\",\"status\":\"Unknown\",\"timestamp\":\"2021-08-20T22:03:29Z\",\"type\":\"status\"}\n  {\"level\":\"warn\",\"time\":\"2021-08-20T22:03:30.015Z\",\"msg\":\"Synchronization failed\",\"mod_name\":\"gitops\",\"project_id\":\"29010173\",\"agent_id\":711,\"commit_id\":\"221499beaf2dcf267cd40324235570001e928817\",\"error\":\"1 resources failed\"}\n  ```\n\nWe can see that deployments only happen on the `dude` namespace because that is all the k-agent has access to. You can add access to other namespaces by creating [Roles and RoleBindings](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) for each namespace like we did for the `dude` namespace.\n\n## Securing GitOps workflow on Kubernetes\n\nNow you have seen how you can create a more restrictive GitOps workflow, allowing you to meet your security needs.\n\nThanks for reading! I hope this guide brings you one step forward into using and securing your GitOps workflow on Kubernetes. For more information see the [GitLab Agent documentation](https://docs.gitlab.com/ee/user/clusters/agent/).\n\nPhoto by \u003Ca href=\"https://unsplash.com/@sebbb?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">seabass creatives\u003C/a> on \u003Ca href=\"https://unsplash.com/s/photos/limited?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>\n{: .note}\n\n## Read more on Kubernetes:\n\n- [How to install and use the GitLab Kubernetes Operator](/blog/gko-on-ocp/)\n\n- [Threat modeling the Kubernetes Agent: from MVC to continuous improvement](/blog/threat-modeling-kubernetes-agent/)\n\n- [A new era of Kubernetes integrations on GitLab.com](/blog/gitlab-kubernetes-agent-on-gitlab-com/)\n\n- [Understand Kubernetes terminology from namespaces to pods](/blog/kubernetes-terminology/)\n\n- [What we learned after a year of GitLab.com on Kubernetes](/blog/year-of-kubernetes/)\n",[1487,539,750],{"slug":3571,"featured":6,"template":683},"setting-up-the-k-agent","content:en-us:blog:setting-up-the-k-agent.yml","Setting Up The K Agent","en-us/blog/setting-up-the-k-agent.yml","en-us/blog/setting-up-the-k-agent",{"_path":3577,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3578,"content":3584,"config":3589,"_id":3591,"_type":16,"title":3592,"_source":18,"_file":3593,"_stem":3594,"_extension":21},"/en-us/blog/5-code-review-features",{"title":3579,"description":3580,"ogTitle":3579,"ogDescription":3580,"noIndex":6,"ogImage":3581,"ogUrl":3582,"ogSiteName":697,"ogType":698,"canonicalUrls":3582,"schema":3583},"How GitLab's 5 new code review features will make life easier","Code reviews are hard to get right. Here are five new features in our DevOps Platform designed to streamline code reviews and provide vital context.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667400/Blog/Hero%20Images/lagos-techie-unsplash.jpg","https://about.gitlab.com/blog/5-code-review-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab's 5 new code review features will make life easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2021-09-09\",\n      }",{"title":3579,"description":3580,"authors":3585,"heroImage":3581,"date":3586,"body":3587,"category":14,"tags":3588},[2509],"2021-09-09","\n_This is the second in a series of blog posts looking at the challenges of code review and the ways a DevOps platform can help. Read the [first post](/blog/the-code-review-struggle-is-real-heres-what-you-need-to-know/)._\n\n## What is a code review, and why is it important?\n\nCode review can be one of the most deceivingly difficult things in delivering software faster. Given the high stakes involved, we've made some key additions to our DevOps Platform that focus on making the code review process as seamless and effective as possible. We believe the number one way to make code reviews effective is to provide context. \n\nToo often we think of [code review tool features](/topics/version-control/what-are-best-code-review-tools-features/) as only \"reading\" and commenting on others' code - but what a good code reviewer does is understand the entire context of the proposed change. Context-driven code reviews should include factors like the issue that spurred on the change, how the change impacts non-obvious things like code quality, security, and performance, and whether the code is maintainable after the change is in place.\n\n## Simplifying code reviews\n\nGiven all of that, we made the merge request the central point of change management and it's one of the key benefits of a DevOps Platform. Using a merge request allows code submitters and reviewers alike to have all of the information required to make the right decisions about a particular change. Making sure that everyone has the same information, and is as informed as possible about how a change will impact the project over all, leads to code reviews that are both quicker and more effective.  \n\nOver the last year we've added five features that help ease the code review pain. Here's a look at all of them:\n\n### 1) Meeting you where you are\n\nSome of the biggest code review changes involve meeting folks where they are - and allowing for a more natural feeling code review. As engineers, we spend most of our days glued to our IDE of choice. And we're used to code not just being static words on a screen, but also interacting and running that code to check its performance and outputs. That's why GitLab has brought a truly integrated experience to your development environment.\n\n**[Here's how to get started with a [DevOps platform](/topics/devops-platform/)]**\n\nIf you use Visual Studio Code as your main development environment like I do, you can now [view merge requests directly in VSCode](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow). In addition, you can [comment and see comments](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/342) in that view as well as [checkout the branch directly from VSCode](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/63). This familiar environment gives you all the benefits of GitLab MRs - CI/CD, security scanning, approval workflows - without having to leave your own development environment. \n\nBut what if you're not at your development box? Or you don't have this particular library or project installed and running locally?  Well there's a great solution for that - [Gitpod](https://gitpod.io) - and it also integrates directly with GitLab.  Gitpod allows you to have a working, containerized development environment in seconds. And now with GitLab 14.2, you can [launch a Gitpod workspace directly from the GitLab merge request](https://www.gitpod.io/blog/gitlab-mr-gitpod-integration).  That means with one button in GitLab you can go from a static code review into a running application with all of the proposed changes.\n\n### 2) Code quality notices built into the MR diff\n\nGitLab already brings [code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html), [security](https://docs.gitlab.com/ee/user/application_security/), [performance](https://docs.gitlab.com/ee/ci/testing/load_performance_testing.html), and [other metrics](https://docs.gitlab.com/ee/ci/testing/metrics_reports.html) directly into the merge request. But in GitLab 13.12, we also added the ability to see [code quality notices directly in the MR diff](https://docs.gitlab.com/ee/ci/testing/code_quality.html). This means that changes to code quality are presented right next to the offending code, making it quick and easy for reviewers to make suggestions about how to keep code quality top notch while shipping changes.\n\n![Code quality notice shown in-line with merge request diff](https://about.gitlab.com/images/blogimages/code_quality_mr_diff_report_v14_2.png)\n\n### 3) File-by-file reviews\n\nSometimes with changes it is nice to use the file explorer view and be able to see changes across multiple files. Other times you might want to do a thorough pass on *every* file to ensure you didn't miss anything. Toggling between seeing all of the changed files and one file at a time is a small but valuable feature that makes code reviews easier.\n\n![Animated image showing changing between show all and show one file at time view in a merge request](https://about.gitlab.com/images/blogimages/animated-single-file-review-example.gif)\n\n### 4) Check off a file as reviewed\n\nSpeaking of small but powerful features, one of my favorite features is something many would consider incredibly small.  But to that I would say - there are no small features, only small merge requests 😄!\n\n**[How to [get the most out of your DevOps platform](/topics/devops/seven-tips-to-get-the-most-out-of-your-devops-platform/)]**\n\nThe ability to check off files as reviewed has become a natural part of my code review workflow - even when the code I'm reviewing might be code I wrote myself! It allows me to focus more of my review time on the biggest impact changes, ignoring smaller changes or ones that don't directly impact the biggest concerns in a review. And in every review session I use it to make sure I've ACTUALLY reviewed every file...not that any reviewer would ever leave one out 😉.\n\n![Viewed check box checked and a file hidden as already reviewed](https://about.gitlab.com/images/blogimages/filed-viewed-merge-request.png)\n\n### 5) Reviewers vs. Assignee\n\nThe last improvement to code review in our DevOps Platform is the addition of \"reviewers\" as an option in a merge request, alongside the existing choice of \"assignee.\" This can help speed up code reviews by ensuring all team members who have to sign off on a merge request are informed and consulted while also making sure there is a clear responsibility on who will take the next action on a merge request, or be the one to actually click the \"merge when pipeline succeeds\" button.\n\nWe hope your teams will try these new and improved DevOps Platform code review features - and we're not done yet.  We'll be shipping improvements and updates to the code review process all of the time. And because everyone can contribute you can add your own ideas and suggestions into our DevOps Platform to make code reviews less painful and more effective.\n",[2266,1128],{"slug":3590,"featured":6,"template":683},"5-code-review-features","content:en-us:blog:5-code-review-features.yml","5 Code Review Features","en-us/blog/5-code-review-features.yml","en-us/blog/5-code-review-features",{"_path":3596,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3597,"content":3602,"config":3607,"_id":3609,"_type":16,"title":3610,"_source":18,"_file":3611,"_stem":3612,"_extension":21},"/en-us/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say",{"title":3598,"description":3599,"ogTitle":3598,"ogDescription":3599,"noIndex":6,"ogImage":1874,"ogUrl":3600,"ogSiteName":697,"ogType":698,"canonicalUrls":3600,"schema":3601},"Making the case for a DevOps platform: What data and customers say","Don't just take our word for why a DevOps platform means better DevOps and faster, safer releases: here's what the latest data shows and how customers have benefitted.","https://about.gitlab.com/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Making the case for a DevOps platform: What data and customers say\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2021-09-08\",\n      }",{"title":3598,"description":3599,"authors":3603,"heroImage":1874,"date":3604,"body":3605,"category":14,"tags":3606},[1859],"2021-09-08","\n_Our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\nIn the struggle to release safer software faster, development teams are increasingly choosing a DevOps platform to help them get there. In our [2021 Global DevSecOps Survey](/developer-survey/) we asked respondents what their DevOps practices included and a \"DevOps platform\" was among the top four choices, right next to CI/CD, test automation, and DevSecOps.\n\nWe're of course bullish on the idea of a DevOps platform, but we're far from alone. Here's a fresh look at how the data – and the customers – support the optimistic trajectory of a DevOps platform.\n\n## DevOps is hot\n\nThe DevOps market was worth $6 billion in 2020, according to Global Industry Analysts, and five-year growth forecasts range from $17 billion to as much as $23 billion, depending on the firm. \n\n**[Watch a [deep dive into GitLab's DevOps Platform](https://www.youtube.com/watch?v=wChaqniv3HI)]**\n\nThis probably doesn't need saying, but one reason the market is so strong is that DevOps works. In late 2020, Forrester Research conducted \"The State of Modern Technology Operations Q4 2020,\" and concluded [\"the DevOps hypothesis is sound\"](https://go.forrester.com/blogs/the-devops-hypothesis-is-sound-introducing-the-2020-state-of-modern-technology-operations-survey/). The report went further to say that companies successfully working in a DevOps/Agile model were able to release faster and thus have higher revenue growth. \n\n## A DevOps platform is the logical next step\n\nBut in order to do DevOps a team needs tools, and too many tools results in a toolchain, which is where things can get very messy quickly. Time consuming handoffs, integrations and maintenance lead to what Forrester calls the \"DevOps tax\" of roughly 10%, meaning teams have to spend that much of their time each month just trying to keep the toolchains running. (In [our 2021 Survey](/developer-survey/), the tax was even higher: 20% of survey takers said they spend between 11% and 20% of their time just on toolchain maintenance and integration).\n\n**[Use a DevOps platform to [avoid the DevOps tax](/topics/devops/use-devops-platform-to-avoid-devops-tax/)]**\n\nA DevOps platform with end-to-end visibility and everything in one place eliminates the tax and boosts DevOps performance. Nearly 12% of survey respondents told us that adding a DevOps platform has allowed them to release software faster. Overall, our survey takers said the use of a DevOps platform resulted in better DevOps, improved collaboration, easier automation and more comprehensive visibility/traceability. \n\nOne developer put it succinctly: \"[Using a DevOps platform] means reduced mean time to recovery (MTTR), quicker time to market, reduced lead time for fixes, and fewer change failures.\"\n\nAnd if all of that wasn't enough, a single DevOps platform gives *everyone* in the company the ability to see and participate in the process. In fact, 23% of our survey takers said everyone in their company – not just Dev and Ops – actually uses the DevOps platform. \n\n## DevOps platforms in the real world\n\nHow do teams really take advantage of a DevOps platform?\n\n[BI Worldwide](/customers/bi-worldwide/), a global engagement agency, found the ability to tie all the processes together made a difference. \"One tool for SCM+CI/CD was a big initial win,\" says Adam Dehnel, product architect at BI. \"Now wrapping security scans into that tool as well has already increased our visibility into security vulnerabilities. The integrated Docker registry has also been very helpful for us. Issue/Product management features let everyone operate in the same space regardless of role.\"\n\n**[How to [get the most out of your DevOps platform](/topics/devops/seven-tips-to-get-the-most-out-of-your-devops-platform/)]**\n\nLess turned out to be more at [Glympse](/customers/glympse/), a geo-location sharing service provider that consolidated close to 20 different tools into GitLab. \"Development can move much faster when engineers can stay on one page and click buttons to release auditable changes to production and have easy rollbacks; everything is much more streamlined,\" explains Zaq Wiedmann, lead software engineer at Glympse. \"Within one sprint, just 2 weeks, Glympse was able to implement security jobs across all of their repositories using GitLab's CI templates and their pre-existing Docker-based deployment scripts.\"\n\nWant a more detailed look at the role a DevOps platform can play in your organization? Explore our [comprehensive guide to DevOps platforms](/topics/devops-platform/).\n\n_Our [2022 Global DevSecOps Survey](/developer-survey/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\n## Read more about the DevOps Platform:\n\n- [The journey to a DevOps Platform](/blog/the-journey-to-a-devops-platform/)\n\n- [How ten steps over ten years led to the DevOps Platform](/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform/)\n\n- [Agile planning with a DevOps platform](/blog/agile-planning-with-a-devops-platform/)\n\n- [Welcome to the DevOps Platform era](/blog/welcome-to-the-devops-platform-era/)\n\n- [It's time to build more accessible software. A DevOps platform can help](/blog/how-the-devops-platform-makes-building-accessible-software-easier/)\n",[1128,1589,2651],{"slug":3608,"featured":6,"template":683},"making-the-case-for-a-devops-platform-what-data-and-customers-say","content:en-us:blog:making-the-case-for-a-devops-platform-what-data-and-customers-say.yml","Making The Case For A Devops Platform What Data And Customers Say","en-us/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say.yml","en-us/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say",{"_path":3614,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3615,"content":3620,"config":3625,"_id":3627,"_type":16,"title":3628,"_source":18,"_file":3629,"_stem":3630,"_extension":21},"/en-us/blog/the-code-review-struggle-is-real-heres-what-you-need-to-know",{"title":3616,"description":3617,"ogTitle":3616,"ogDescription":3617,"noIndex":6,"ogImage":1400,"ogUrl":3618,"ogSiteName":697,"ogType":698,"canonicalUrls":3618,"schema":3619},"The code review struggle is real. Here's what you need to know","If it's time for a DevOps Platform, don't forget the role code review plays. Our 2021 Global DevSecOps Survey showed why it's both critical and tricky to get right.","https://about.gitlab.com/blog/the-code-review-struggle-is-real-heres-what-you-need-to-know","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The code review struggle is real. Here's what you need to know\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2021-09-03\",\n      }",{"title":3616,"description":3617,"authors":3621,"heroImage":1400,"date":3622,"body":3623,"category":14,"tags":3624},[1859],"2021-09-03","\n_Our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\nWhen making a list of the reasons to consider moving to a DevOps Platform, don't forget about code review, a critical piece of the process that's also an incredible source of frustration and delays to developers and their teams.\n\nIn our [2021 Global DevSecOps Survey](/developer-survey/), respondents told us code quality was the number one reason they chose DevOps. But, when asked what was most likely to delay a product release, code review – vital to code quality – was one of the top four culprits (the others were testing, planning and code development). \n\nThe fact that code review is a pain point is hardly surprising, given that it can often require context-switching, communication, collaboration, and of course subject matter expertise. At a time when it's never been more urgent to release secure code as quickly as possible, it's not a stretch to think code reviews can feel like a hard stop to some teams, particularly if the process is not integrated into an existing workflow.\n\n**[Here's everything you need to know about a [DevOps Platform](/topics/devops-platform/)]**\n\n## Why code review is painful\n\nIn fact, when we asked our survey respondents to tell us in their own words what they struggle with when it comes to code review, they had \\*a lot\\* to say on the subject.\n\n*\"Code reviews can take a long time due to the lack of reviewers.\"*\n\n*\"Many people find it a chore to review code.\"*\n\n*\"We have a strict code review process and it often takes several days for the reviewer to respond to requests for review.\"*\n\n*\"Code review takes time and every developer has to explain how they achieved what they did.\"*\n\n*\"Developers are sometimes unaware they have to do code reviews. They aren't sure how to perform them and if they are effective. Sometimes they are skipped so the process can go through.\"*\n\n*\"Finding someone for code review can be hard (1-day average). After that, business tests take time to be completed (2-4 days on average).\"*\n\n[Code review is tricky](/blog/challenges-of-code-reviews/), but almost 60% of those surveyed said the reviews were \"very valuable\" in ensuring code quality and security. And it's not like teams aren't actually tackling code review: In 2021 close to 45% of respondents said they review code weekly, and 22% do it every other week – a 14% jump from 2020.\n\n**[Your organization needs a DevOps Platform team. [Here's why](/topics/devops/how-and-why-to-create-devops-platform-team/)]**\n\nBut anecdotal data tells a slightly different story, from developers saying their teams do no code review at all, to code reviews so comprehensive they include every merge request, ticket, or pull. Many developers said they review code daily, or even multiple times a day. Survey takers said code reviews were most likely conducted using online chat, with developers showing a strong preference for reviewing code in an IDE rather than a browser.\n\n## Better code reviews\n\nAt GitLab we pride ourselves in dogfooding our DevOps Platform, so of course we spend a lot of time thinking about how to [improve our code review process](/blog/better-code-reviews/). We've had a lot of success [using smaller merge requests](/blog/iteration-and-code-review/), as just one example.\n\nOur survey takers told us they were on the same continuous improvement journey – many spent the past year [evaluating how to make their code reviews and other DevOps stages more efficient](/blog/efficient-code-review-tips/). One respondent offered a detailed look:\n\n*\"We evaluated the team and did value stream mapping and finalized the desired state. In most of the cases we found the team needs an automated pipeline for faster delivery and immediate feedback so that they can act fast rather than later. We also moved security left so that developers can fix security issues fast. And we also made sure developers are doing code review in a collaborative way through pull requests.\"*\n\nAnother team focused exclusively on reducing its dependence on code review:\n\n*\"(We are no longer) relying on code review to have caught all the test scenarios. We now use a coverage scanning tool to tell us if we've got it all.\"*\n\n## More code reviews > less code reviews\n\nThe struggle is real, but so is the importance. Despite a lot of complaining about code review, developers remained adamant about its importance in DevOps. When we asked devs what they wish they could do more of, code review was at the top of the list, with more than 1000 survey takers indicating they wish they could do way more code reviews than they're doing at present.\n\nIn our next blog post, we'll outline five ways GitLab's DevOps Platform has made code reviews easier.\n\n_Our [2022 Global DevSecOps Survey](/developer-survey/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n",[2266,1128,1589],{"slug":3626,"featured":6,"template":683},"the-code-review-struggle-is-real-heres-what-you-need-to-know","content:en-us:blog:the-code-review-struggle-is-real-heres-what-you-need-to-know.yml","The Code Review Struggle Is Real Heres What You Need To Know","en-us/blog/the-code-review-struggle-is-real-heres-what-you-need-to-know.yml","en-us/blog/the-code-review-struggle-is-real-heres-what-you-need-to-know",{"_path":3632,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3633,"content":3638,"config":3643,"_id":3645,"_type":16,"title":3646,"_source":18,"_file":3647,"_stem":3648,"_extension":21},"/en-us/blog/the-journey-to-a-devops-platform",{"title":3634,"description":3635,"ogTitle":3634,"ogDescription":3635,"noIndex":6,"ogImage":1991,"ogUrl":3636,"ogSiteName":697,"ogType":698,"canonicalUrls":3636,"schema":3637},"The journey to a DevOps Platform","Understand the history of DevOps or be doomed to repeat it. Here's why the journey has been so painful and how a DevOps Platform will help.","https://about.gitlab.com/blog/the-journey-to-a-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The journey to a DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cormac Foster\"}],\n        \"datePublished\": \"2021-09-02\",\n      }",{"title":3634,"description":3635,"authors":3639,"heroImage":1991,"date":3640,"body":3641,"category":14,"tags":3642},[3283],"2021-09-02","\n\nIn a recent blog post [about the importance of a DevOps Platform](/blog/welcome-to-the-devops-platform-era/), GitLab CEO Sid Sijbrandij outlined four phases through which organizations frequently travel as their practice matures. It’s a painful journey we see again and again when we meet new customers. It spans every industry and every company size, and it’s the most mature DevOps teams with the most at stake who’ve felt the most pain. \n\nHistorically, if you wanted DevOps to work, you had to be prepared to pay for it. Just managing the backbone of DevOps – the toolchain -– has been a grind. Your “Jenkins Team,” your “GitHub Team,” or even, as one of our customers described, your “Duct Tape Team” (designed to hold it all together and patch holes), added no end value beyond keeping everything from falling apart. That’s a lot of investment to keep the lights on.\n\nIt’s a hard commitment to swallow, and the truth of it is that you shouldn’t have had to. A big part of the problems behind many “low-performing DevOps teams” stems from a poor set of tools for the job. Broadly put, on behalf of the DevOps tool industry: It’s not you, it’s us. The industry created many of these problems because we were thinking small and building to match.\n\n\nAs a philosophy, DevOps is pretty new, and it’s evolved very quickly. That rapid evolution has meant tremendous transformational opportunity, but building for the present left many tools, and the processes behind them, obsolete as soon as they hit the market. \n\nDevOps toolmakers have long been focused on solving discrete, easily understood problems (“BYO DevOps” in Sid’s blog), while DevOps has always aimed at solving bigger problems and looked to a more collaborative, productive transformation. You knew that when you tried to calm the chaos by implementing standards (BIC DevOps). You knew that when you tried to Frankenstack those tools into a servant of your larger ambitions with DIY DevOps integrations. But in the end, tools were creating almost as much work as they automated.\n\nIt makes sense when requirements are evolving so quickly. In 2011, when GitLab offered just a repository and issues, we couldn’t have foreseen [Design Management](https://docs.gitlab.com/ee/user/project/issues/design_management.html) or [ML Ops](/handbook/engineering/incubation/mlops/), but ten years later, they’re a key components of a movement toward a DevOps Platform for everyone. And that’s the point of the DevOps Platform Era (Phase 4). We’ve iterated our way to a place where we can replace blockers with enablement, and \\*support\\* your efforts instead of increasing your burden.\n\n**[Stop paying the “DevOps tax” by moving to a DevOps Platform. [Here’s how](/topics/devops/use-devops-platform-to-avoid-devops-tax/)]**\n\nThis isn’t unexpected. Every technology reaches this inflection point as it evolves. In the not-too-distant-past, customer relationship management (CRM) required a portfolio of sales force automation and marketing automation tools, commerce engines, app servers, analytics engines, and huge amounts of data integration to make it work. Now we have SaaS-based CRM solutions with a single monthly fee.\n\nWhile GitLab has always focused on delivering a DevOps Platform as a single application, we're excited to see the industry as a whole shift to a platform mindset. Late last year, Gartner released its vision of the [DevOps Value Stream Delivery Platform](/solutions/value-stream-management/) in a new [Market Guide](https://page.gitlab.com/resources-report-gartner-market-guide-vsdp/), in which we’re happy to be a representative vendor, and we’re excited to watch their coverage grow.\n\n**[Make [the most out of your DevOps platform](/topics/devops/seven-tips-to-get-the-most-out-of-your-devops-platform/)]**\n\nWe’re also excited to hear how a DevOps Platform benefits our customers in concrete ways. In our [2021 DevSecOps Survey](/developer-survey/), respondents told us a DevOps Platform resulted in better DevOps, improved collaboration, easier automation and expanded visibility and traceability. Or, as one survey taker said, a DevOps Platform “gives us reduced mean time to recovery (MTTR), quicker time to market, reduced lead time for fixes, and fewer change failures.”\n\nDevOps hasn’t stopped evolving, and neither have we, but we’ve reached the point where we know how the pieces need to work together, and we’ve built a platform to support it. To see for yourself, [try GitLab Ultimate for free](/free-trial/)!\n\n## Read more about the DevOps Platform:\n\n- [How ten steps over ten years led to the DevOps Platform](/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform/)\n\n- [Making the case for a DevOps platform: What data and customers say](/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say/)\n\n- [Agile planning with a DevOps platform](/blog/agile-planning-with-a-devops-platform/)\n\n- [Welcome to the DevOps Platform era](/blog/welcome-to-the-devops-platform-era/)\n\n- [It's time to build more accessible software. A DevOps platform can help](/blog/how-the-devops-platform-makes-building-accessible-software-easier/)\n\n",[1128,1528,836],{"slug":3644,"featured":6,"template":683},"the-journey-to-a-devops-platform","content:en-us:blog:the-journey-to-a-devops-platform.yml","The Journey To A Devops Platform","en-us/blog/the-journey-to-a-devops-platform.yml","en-us/blog/the-journey-to-a-devops-platform",{"_path":3650,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3651,"content":3657,"config":3662,"_id":3664,"_type":16,"title":3665,"_source":18,"_file":3666,"_stem":3667,"_extension":21},"/en-us/blog/welcome-to-the-devops-platform-era",{"title":3652,"description":3653,"ogTitle":3652,"ogDescription":3653,"noIndex":6,"ogImage":3654,"ogUrl":3655,"ogSiteName":697,"ogType":698,"canonicalUrls":3655,"schema":3656},"Welcome to the DevOps Platform era","GitLab CEO Sid Sijbrandij reflects on the evolution of DevOps and the emergence of the DevOps Platform as the solution for businesses wanting to deliver software faster, more securely, and at a lower cost.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668101/Blog/Hero%20Images/dop_cover.png","https://about.gitlab.com/blog/welcome-to-the-devops-platform-era","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Welcome to the DevOps Platform era\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2021-08-03\",\n      }",{"title":3652,"description":3653,"authors":3658,"heroImage":3654,"date":3659,"body":3660,"category":14,"tags":3661},[2379],"2021-08-03","\nDevOps has evolved since its infancy, over a decade ago. Swiss developmental psychologist Jean Piaget believed human cognitive development has [four stages](https://www.healthline.com/health/piaget-stages-of-development) (sensorimotor, preoperational, concrete operational, and formal operational). Through each of these stages, the human mind obtains new knowledge while building and modifying memories to inform one's understanding of the world around them.\n\nIn the same way that people go through stages as they grow, markets and industries also go through stages of development. Over the years, DevOps has grown into a mature, business-critical practice.\n\nAs the DevOps industry expanded, so did the number and complexity of tool-project integrations within an organization. This was the result of three developments in DevOps:\n\n1. Companies moved from monolithic architectures to [microservices architectures](/topics/microservices/). By doing so, applications could scale independently, allowing teams to move faster.\n2. The faster delivery of software also required companies to use more DevOps tools per project.\n3. The linear growth of both or more projects and more tools per project led to an exponential increase in the number of project-tool integrations.\n\nThis increase in project-tool integrations called for a change in the way organizations adopted DevOps tools. At GitLab, we identified four phases of evolution in the adoption of DevOps tools over time.\n\n## Phase 1 - Siloed DevOps\n\nIn this early phase, each department or team built or purchased their own tools in isolation, which they optimized for their own narrow objectives, without explicitly coordinating with others. This led to a \"Siloed DevOps\" environment that caused problems when teams tried to work together because they were not familiar with the tools of the other teams. It is common for organizations at this level of maturity to have multiple duplicative sets of tooling for common DevOps functions like planning, source code management, and CI/CD. The chaotic environment slows down collaboration and knowledge sharing or stops it altogether.\n\n## Phase 2 - Fragmented DevOps\n\nThe need for less chaos and more harmony drove organizations to the second phase, Fragmented DevOps. In this phase, organizations standardized on the same set of tools across the organization. Typically, there was one preferred tool for each stage of the DevOps lifecycle. Teams within the same function could collaborate better, but the tools were not connected between stages. As an example, planning was standardized and deployment was standardized, but each stage was still siloed from each other. It was hard to move through the DevOps lifecycle.\n\n## Phase 3 - DIY DevOps\n\nOrganizations that tried to remedy this by manually integrating their DevOps point solutions together reached the third phase, \"DIY DevOps\". Unfortunately – as many DIYers will know all too well – when you try to put together many different parts that were never designed to work with each other, the end results never fit quite right. In the same way, homegrown toolchains create complex workflows that slow down the development process — and overall cycle time. For many organizations, maintaining DIY DevOps toolchains requires significant effort, resulting in higher costs, slower cycle times, and opportunities for vulnerabilities to be targeted.\n\n## Phase 4 - The DevOps Platform era\n\nThe true potential of DevOps was not fully realized in the first three phases. That's why I am proud that GitLab is the leader in enabling the fourth phase, the DevOps Platform era. [The DevOps Platform](/topics/devops-platform/) is a single application with one user interface and a unified data store. It includes every stage of the DevOps lifecycle and brings together development, operations, and security teams. It allows these groups to collaboratively plan, build, secure, and deploy software. As a result, this improves businesses' velocity, efficiency, and security, allowing them to deliver software faster and at a lower cost.\n\n## The future of DevOps\n\nWhen I think about the future of DevOps, three things stand out. First, I believe that a platform solution with embedded security _ is_ the future. Security that is built-in, not bolt-on, is needed to secure a software supply chain from end-to-end without sacrificing speed for security.\n\nFor example, the world's most trusted hacker-powered security company, HackerOne, is using The DevOps Platform. With GitLab, they've been able to replace their DIY toolchain and shift security left. HackerOne is now catching security flaws early and getting immediate feedback since security is built into the developer's workflow.\n\nIn May, the U.S. government [issued a new policy](https://www.whitehouse.gov/briefing-room/presidential-actions/2021/05/12/executive-order-on-improving-the-nations-cybersecurity/) aimed at securing both the private and public sector software supply chains against malicious cyberattacks. Now is the time to make security a fundamental part of your DevOps journey. In today's landscape, you need to secure 100% of your applications every time they get updated. The only practical way to do that is to integrate security into the platform.\n\nSecond, I believe that machine learning will be critical in making the DevOps workflow faster. In the [GitLab 2021 DevSecOps survey](/developer-survey/), 75% of respondents reported that their DevOps teams are using or planning to use machine learning or AI for testing and code review. In June, [GitLab announced the acquisition](/press/releases/2021-06-02-gitlab-acquires-unreview-machine-learning-capabilities.html) of a machine learning-based solution called UnReview. This acquisition and continued machine learning integration will automate workflows and compress the DevOps cycle time. GitLab is focused on using machine learning to reduce friction in your work, so you can spend more time innovating.\n\nThird, I believe DevOps platform adoption will accelerate. [Gartner predicts that by 2023](/press/releases/2020-12-09-gitlab-cited-as-representative-vendor-in-gartner-market-guide.html), 40% of organizations will have switched from multiple point solutions to a platform in order to streamline application delivery. Gartner's prediction is an increase from the base of 10% or less using a DevOps Platform in 2020. GitLab customers often tell us that DIY toolchains are too complicated. If you're feeling that way too, it's time to choose a path to simplicity. The fastest way to get there is with the DevOps Platform.\n\nYou don't need to rip and replace to get started. Many customers began their GitLab journey with Source Code Management and CI. When they were ready, GitLab helped them to replace the rest of their DIY DevOps. When _you're_ ready, GitLab will work with you and GitLab's partner ecosystem to help you achieve your DevOps objectives on your schedule.\n\nJust like human cognitive development, DevOps has evolved thanks to combined experiences and new knowledge as it became available. I'm grateful to the innovators before us with the same goal: To make DevOps more efficient and collaborative.\n\n## Join us at GitLab Virtual Commit\n\nWant more DevOps? Tune in virtually at [GitLab Commit August 3-4, 2021](/events/commit/). Watch a video of the keynote address this blog post is based on:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://player.vimeo.com/video/582282482\" width=\"640\" height=\"360\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\u003C!-- blank line -->\n\n## Read more about the DevOps Platform:\n\n- [The journey to a DevOps Platform](/blog/the-journey-to-a-devops-platform/)\n\n- [How ten steps over ten years led to the DevOps Platform](/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform/)\n\n- [Making the case for a DevOps platform: What data and customers say](/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say/)\n\n- [Agile planning with a DevOps platform](/blog/agile-planning-with-a-devops-platform/)\n\n- [It's time to build more accessible software. A DevOps platform can help](/blog/how-the-devops-platform-makes-building-accessible-software-easier/)\n",[1128,837,1210],{"slug":3663,"featured":6,"template":683},"welcome-to-the-devops-platform-era","content:en-us:blog:welcome-to-the-devops-platform-era.yml","Welcome To The Devops Platform Era","en-us/blog/welcome-to-the-devops-platform-era.yml","en-us/blog/welcome-to-the-devops-platform-era",{"_path":3669,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3670,"content":3676,"config":3683,"_id":3685,"_type":16,"title":3686,"_source":18,"_file":3687,"_stem":3688,"_extension":21},"/en-us/blog/gitlab-14-modern-devops",{"title":3671,"description":3672,"ogTitle":3671,"ogDescription":3672,"noIndex":6,"ogImage":3673,"ogUrl":3674,"ogSiteName":697,"ogType":698,"canonicalUrls":3674,"schema":3675},"Modern DevOps shift in GitLab 14: Speed, trust & visibility","GitLab 14 accelerates modern DevOps, bringing velocity with confidence, built-in security, and visibility into DevOps success.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668091/Blog/Hero%20Images/gitlab-version-14-wide.png","https://about.gitlab.com/blog/gitlab-14-modern-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab 14 signals shift to modern DevOps: A DevOps platform with velocity, trust, and visibility\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brian Glanz\"}],\n        \"datePublished\": \"2021-06-22\",\n      }",{"title":3677,"description":3672,"authors":3678,"heroImage":3673,"date":3680,"body":3681,"category":14,"tags":3682},"GitLab 14 signals shift to modern DevOps: A DevOps platform with velocity, trust, and visibility",[3679],"Brian Glanz","2021-06-22","\n\nThe DevOps era began with a big idea – dissolve silos to deliver better software, faster. In the transition from classic software paradigms, DIY DevOps toolchains were built with parts that were never designed to work together. That DIY DevOps era left many trapped in new silos, without visibility, and mired in maintenance. Business outcomes suffered as the potential of DevOps was never fully realized.\n\n\n## The next iteration of DevOps\nThere is a better way to build software. [GitLab 14](/gitlab-14/) delivers modern DevOps with a [complete DevOps platform](/topics/devops-platform/), for a streamlined experience that unleashes the power of DevOps. Over the past year, GitLab has shipped advanced DevOps platform capabilities that enable any team to build and deliver software with velocity, trust, and visibility – no matter their size, industry, or location.  \n\nWith enhancements across the software development lifecycle, GitLab has placed strongly in several market reports across a broad range of areas from [Enterprise Agile Planning](/analysts/gartner-eapt21/) and [Application Security Testing](/analysts/gartner-ast21/) to [Continuous Delivery and Release Automation](/analysts/forrester-cdra20/). Tying it all together with a platform approach is a keystone of the next shift in the DevOps movement. GitLab was named a representative vendor in a market overview of [DevOps platforms](/analysts/gartner-vsdp21/).\n\nAs a “new normal” is taking shape after the pandemic, companies worldwide are coming to grips with what it means to work in hybrid and remote environments. A modern DevOps solution needs to meet the emerging demands for a more flexible workplace. GitLab has been a pioneer and champion of remote work for years and was recently [mentioned by Fast Company as a world-changing idea](https://www.fastcompany.com/90624506/world-changing-ideas-awards-2021-general-excellence-finalists-and-honorable-mentions). Having unlocked many of the secrets to remote work success, GitLab stepped up to help others out by shipping a [Remote Work Playbook](/company/culture/all-remote/) and a Coursera course on “[How to Manage a Remote Team](https://www.coursera.org/learn/remote-team-management).” Our all-remote know-how and experience went into the development of GitLab 14 to build capabilities that work wherever you do. \n\n\n## Velocity with confidence\nGitLab 14 enables you to increase development velocity and stay confident with a consistent and efficient developer and operator experience, yielding a more predictable DevOps lifecycle. By using one platform for source code management, continuous integration (CI), continuous delivery (CD), infrastructure as code, security, and beyond, teams are more efficient, collaborative, and productive. Our [2021 Global DevSecOps Survey](/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen/) shows engineers are happier when they can focus on innovation and adding value than when maintaining integrations – and happy developers attract and retain talent.\n\nOrganizations with a mature DevOps culture know the value of managing configuration as code, IT infrastructure as code, and more, with the same platform and best practices used for application development. In GitLab 14, our [Pipeline Editor](/releases/2021/01/22/gitlab-13-8-released/#the-new-pipeline-editor-makes-cicd-easy-to-use) lowers the barrier to entry for CI/CD while also accelerating power users, with visual authoring and versioning, continuous validation, and pipeline visualization. GitLab 14’s [Kubernetes Agent](/blog/gitlab-kubernetes-agent-on-gitlab-com/) enables secure deployment to your cloud-native infrastructure. GitLab 14 also meets customers where they are by supporting GitOps with agent-based and agentless approaches and allows for deployments anywhere, regardless of whether infrastructure is cloud-native.\n\n\n## Visibility into DevOps success\nThe [DevOps Research and Assessment (DORA)](https://www.devops-research.com/research.html) firm’s industry-defining research shows how focused improvement of software delivery performance leads to positive business outcomes like happier customers, greater market share, and increased revenue. Focusing efforts requires measuring four metrics in particular that are highly correlated with business performance. These are deployment frequency, lead time for changes, time to restore service, and change failure rate. \n\nAs a complete DevOps platform, GitLab 14 is uniquely capable of delivering visibility into DevOps with out of the box measurement and visualization of operational metrics, including DORA metrics, that have come to define DevOps maturity. With that visibility comes confidence in the ability to drive both team performance and competitive advantage. \n\nGitLab 14 also takes the key next step toward actionability, with an array of customizable Value Stream Analytics to optimize workflows. Constituent analytics like mean time to merge can uncover bottlenecks such as dysfunction in code review, allowing management to identify the root causes of slowdowns in the DevOps lifecycle, and enabling IT leaders to align with business priorities.\n\n\n## Built-in security\nSecurity without sacrifice – the promise of [DevSecOps](/topics/devsecops/) – is realized with built-in security for platform-driven alignment that decreases exposure, while keeping projects on-time and on-budget. In a world where security is everyone’s responsibility, automating processes and policies gives developers and security pros the information they need to meet this responsibility.  \n\nEnforcing security on every commit is a matter of course in GitLab 14’s CI/CD, providing real-time feedback as development is happening. A Semgrep analyzer for application security testing offers access to a global rule registry and customization for policy requirements. Acquisitions of Fuzzit and Peach Tech, and GitLab’s new proprietary browser-based DAST crawler, test modern APIs and Single Page Applications (SPAs) demonstrating innovation to meet requirements of modern DevOps. New vulnerability management capabilities increase visibility, providing the controls and observability needed to protect the software factory and its deliverables.\n\n\n## Everyone can contribute\nGitLab 14 has been built by the company and the community together to advance global adoption of modern DevOps. \n\nThanks to GitLab’s open core model, more than 10,000 merge requests from the wider community have been merged into the product since January 2016. The wider community contributes alongside more than 1,300 GitLab team members, all working remotely from 68 countries. GitLab believes in a world where everyone can contribute.\n\nGitLab has more than 30 million estimated registered users, from startups to global enterprises, including Ticketmaster, Jaguar Land Rover, Nasdaq, Dish Network, Comcast, and [more who have shared their stories](/customers/), and who trust GitLab to deliver great software, faster.\n",[1128,1210],{"slug":3684,"featured":6,"template":683},"gitlab-14-modern-devops","content:en-us:blog:gitlab-14-modern-devops.yml","Gitlab 14 Modern Devops","en-us/blog/gitlab-14-modern-devops.yml","en-us/blog/gitlab-14-modern-devops",{"_path":3690,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3691,"content":3697,"config":3703,"_id":3705,"_type":16,"title":3706,"_source":18,"_file":3707,"_stem":3708,"_extension":21},"/en-us/blog/devops-platform-supply-chain-attacks",{"title":3692,"description":3693,"ogTitle":3692,"ogDescription":3693,"noIndex":6,"ogImage":3694,"ogUrl":3695,"ogSiteName":697,"ogType":698,"canonicalUrls":3695,"schema":3696},"How a DevOps Platform helps protect against supply chain attacks","Built-in security features can simplify your software factory","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665839/Blog/Hero%20Images/devops.png","https://about.gitlab.com/blog/devops-platform-supply-chain-attacks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How a DevOps Platform helps protect against supply chain attacks\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cindy Blake\"}],\n        \"datePublished\": \"2021-04-28\",\n      }",{"title":3692,"description":3693,"authors":3698,"heroImage":3694,"date":3700,"body":3701,"category":14,"tags":3702},[3699],"Cindy Blake","2021-04-28","The recent Solarwinds supply chain attack made us all question the security of our software development, deployment, and use, particularly in the era of [DevOps](/topics/devops/) and cloud-native applications. Security teams often struggle to ensure security is not an afterthought as software is developed faster, released more often, and uses tools that have been beyond the radar of the security team. In fact, when [NIST describes DevSecOps](https://csrc.nist.gov/Projects/devsecops) they say that DevOps is being embraced \"_often without a full understanding and consideration of security_,\" putting CISOs at a disadvantage right when they are being tasked with ensuring a secure software supply chain.\n\n## The problem with a traditional AppSec approach\n\nCISOs often struggle to bridge large investments in traditional [application security](/topics/devsecops/) (AppSec) tools with more modern approaches that embed security into the software factory itself. Traditional AppSec approaches lead to several challenges:\n\n*   **Cost:** One tool for each scan type can get expensive\n*   **Integration:** Integrating point solutions into CI toolchains requires ongoing maintenance\n*   **Trade-offs:** Emphasis on triaging vulnerabilities and prioritizing risk of CVE findings (signature-based) over remediation\n*   **Legacy:** Limited comprehension of modern infrastructure as code and misconfigurations with little thought for container security and API security\n*   **Lack of visibility and context** into the code and the build itself with Security teams not often involved in build process and controls\n*   **Compliance**: Difficult to apply/administer policies across multiple CI and security tools\n\nWhile traditional tools fall short, the importance of software supply chain security is in the spotlight, even as a point of national security. It is anticipated that the US government will release additional guidelines for software used by government agencies. In addition, [NIST's DevSecOps project](https://csrc.nist.gov/Projects/devsecops) is working to create a set of DevSecOps practices explaining that \"_DevSecOps helps ensure that security is addressed as part of all DevOps practices by integrating security practices and automatically generating security and compliance artifacts throughout the process_.\" Similarly, the Cloud Native Computing Foundation (CNCF) has [drafted recommended best practices](https://docs.google.com/document/d/1VURD9rdEhiuqPdixhEozkHw01Tk6e2AaJVjBK3pK6Zc/edit#heading=h.jzcan9eheioa) for DevSecOps. Together, these guidelines will provide a starting point and a way to identify the most critical efforts for compliance.\n\n## The role of a DevOps Platform\n\nAs one of the only true [end-to-end DevOps platforms](/solutions/devops-platform/), GitLab has a role to play. GitLab can help you meet the challenges of developing modern applications while enabling a higher level of security. GitLab is a recognized [leader in both SCM and CI](/analysts/forrester-cloudci19/), and more recently, an up-and-comer in the application security space. GitLab has built security features right into the DevOps platform (DevSecOps anyone?), and industry analysts have included GitLab in a wide [variety of reports](/analysts/) where the GitLab's security features are compared head-to-head against point security solutions.\n\nAn integrated platform like GitLab brings benefits that individual tools cannot, including things like:\n\n*   **End-to-end visibility and auditability**: Who changed what, where, and when.\n*   **Consistent application and administration of policies**: Both what policies are used where, and the actions taken for exceptions\n*   **More intelligent response** through greater end-to-end context\n*   **Reduced attack surface** of a simplified toolchain\n\n## Five steps to greater application security\n\nWith an estimated 30 million+ users from startups to global enterprises, GitLab has to take security seriously.  Here are five ways to combine our powerful DevSecOps platform with a holistic security program to help you quickly gain control and visibility of your software supply chain. These efforts will require a combination of people, processes and tools, along with cross-department collaboration.\n\n### Step 1: ASSESS your security hygiene, considering new attack surfaces\n\nEven the most damaging attacks tend to rely on complacency toward basic security hygiene (think patches and passwords) and use tried and true exploits that have been around for a long time. While this recommendation may not be anything new, the scope of the effort may be. Revisit your security policies, and consider new attack surfaces such as your software development toolchains, containers, orchestrators, and infrastructure as code. Are [secrets detected](https://docs.gitlab.com/ee/user/application_security/secret_detection/)? Is [multi-factor authentication](https://docs.gitlab.com/ee/user/profile/account/two_factor_authentication.html) used? Check your admin settings for [visibility and access controls](https://docs.gitlab.com/ee/administration/settings/visibility_and_access_controls.html#visibility-and-access-controls).\n\n### Step 2: AUTOMATE scanning, policies, and compliance\n\nDo you automate security scans within standardized CI pipelines? Most people use SAST and/or penetration testing and more are adding dependency scanning. Each type of scan will find different types of vulnerabilities, but applying comprehensive scans to your entire application portfolio can be prohibitively expensive with point solutions. If you try to integrate multiple scan types into a heterogeneous tool chain, the complexity and cost is compounded.\n\nGitLab's single platform includes [comprehensive app sec scanning](https://docs.gitlab.com/ee/user/application_security/) with SAST, DAST, dependency, container scanning, secrets detection, and fuzz testing including API fuzzing. That allows you to do three things:\n\n1. **Scan all of your code**, including third party code and code in containers. You can easily [configure security scans used](https://docs.gitlab.com/ee/user/application_security/configuration/#security-configuration) via GitLab Ultimate's AutoDevOps feature.\n2. **Scan every code change**. GitLab's built-in [app sec testing](https://docs.gitlab.com/ee/user/application_security/) scans every code change using multiple scan methods with [one common UI](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/). Even [DAST](https://docs.gitlab.com/ee/user/application_security/dast/) can be run within the CI pipeline by leveraging the [review app](https://docs.gitlab.com/ee/ci/review_apps/) (within GitLab's CI capability). Because scans occur before the code is pushed into a main branch, it's possible to introduce fewer vulnerabilities into shared environments.\n3. **Utilize Fuzz Testing** to find insecure logic flaws that do not have a signature of a known CVE. GitLab's security scanning includes both [coverage-guided](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/) and [behavioral testing for web APIs](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/). Because fuzz testing is integrated into the CI pipeline alongside the other scanners, the results are more readily available and set up is easier than stand-alone fuzzing.\n\nAutomation is great, but you also must ensure that it is applied in a standardized, controlled CI process. As CNCF points out, \"_Automating as much of the software supply chain as possible can significantly reduce the possibility of human error and configuration drift_.\" Do you require a [standardized CI template to be](https://docs.gitlab.com/ee/development/cicd/templates.html#requirements-for-cicd-templates) used for all projects? Do you [automatically apply compliance](https://docs.gitlab.com/ee/user/project/settings/#compliance-pipeline-configuration) to an industry standard? When vulnerabilities are found, who can [approve MRs with policy exceptions](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/index.html#required-approvals)?\n\nAutomating policies ensures more consistent compliance while also reducing the audit surface. The automation of CI/CD is one vehicle to apply [common controls](https://docs.gitlab.com/ee/administration/compliance.html)that include things like:\n\n*   [Segregation of incompatible duties](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/rules.html#merge-request-approval-segregation-of-duties)\n*   [Identity and access approval controls](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/rules.html)\n*   Configuration management and [change control](https://docs.gitlab.com/ee/user/project/repository/push_rules.html)\n*   [Access restrictions](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/rules.html#editing--overriding-approval-rules-per-merge-request) for changes to configurations and pipelines\n*   [Protected branches](https://docs.gitlab.com/ee/user/project/protected_branches.html) and environments\n*   [Auditing](https://docs.gitlab.com/ee/administration/audit_events.html)\n*   [Licensed code usage](https://docs.gitlab.com/ee/user/compliance/license_compliance/#license-compliance)\n*   [Security testing](https://docs.gitlab.com/ee/user/application_security/)\n\nGitLab Ultimate offers many [compliance capabilities](/solutions/compliance/) within a single DevOps platform. Included are a [compliance dashboard](https://docs.gitlab.com/ee/user/compliance/compliance_report/index.html) along with a [host of compliance features](https://docs.gitlab.com/ee/administration/compliance.html), [compliance management](/direction/govern/compliance/compliance-management/), and audit reports. In short, apply automation wherever possible to make it more likely that policies are applied consistently.\n\n### Step 3: PROTECT the application's infrastructure\n\nModern applications rely on much more than the code itself. You have to consider your cloud-native infrastructure like Docker and Kubernetes environments. Apply container scanning and use SAST to scan Helm charts. Consider using GitLab [Container Host Security](https://docs.gitlab.com/ee/update/removals.html) and [Container Network Security](https://docs.gitlab.com/ee/update/removals.html). GitLab's integration with [Falco](https://docs.gitlab.com/ee/update/removals.html) and [AppArmor](https://docs.gitlab.com/ee/update/removals.html), when used in the CI environment, can alert and prevent build servers from doing unexpected things such as modifying scheduled tasks (OS configuration in general). Check more obscure things like the container registry. Who at your org has write access? A compromise of one person could potentially lead to a compromise of the container registry, which could lead (via pipelines) to compromises of numerous projects.\n\n### Step 4: SECURE the software factory itself\n\nGitLab's DevOps platform simplifies the effort required to secure the software factory itself with one place to manage access, software factory policies, and repeatable, measurable processes. GitLab's Security team has several blog articles on best practices and projects that may be helpful:\n\n*   Applying [Zero Trust](/blog/tags.html#zero-trust) principles (things like least privilege access). Even one of our vendors took notice: [GitLab Goes All In on Zero Trust to Secure a Fully Remote...](https://www.okta.com/blog/2020/06/gitlab-goes-all-in-on-zero-trust-to-secure-a-fully-remote-workforce/)\n*   Our continued integration of new technology has an emphasis on both productivity and security. For instance, our integration with [Hashicorp Vault](/partners/technology-partners/hashicorp/) can require all entities operating in the supply chain environment to mutually authenticate using hardened authentication mechanisms with regular key rotation.\n*   Consider hardening the GitLab instance. These [best practices](/blog/gitlab-instance-security-best-practices/) are a place to start while additional research is being done [openly to help improve the product and further enhance the hardening process](https://gitlab.com/gitlab-com/gl-security/security-research/gitlab-standalone-instance). Hardened UBI-based cloud native GitLab images should be checked and verified regularly.\n*    [CI/CD Variables](https://about.gitlab.com/blog/demystifying-ci-cd-variables/) can control the behavior of  pipelines. [Scoped environments](https://docs.gitlab.com/ee/ci/environments/#scoping-environments-with-specs) can limit the scope of a CI/CD variable by defining for which environments it can be available (production, for instance). \n\n### Step 5: ITERATE with continuous assessment and improvement\n\nSecuring the modern software supply chain will require you to revisit steps 1-4 above continuously. The more complex your toolchain or environment, the harder it is to stay current on securing your app and your supply chain. Modern application development processes demand a new way of thinking, tooling the software factory itself for security and controls, rather than inspecting code after it's built. This mindset can be challenging, especially when you are saddled with expensive, traditional tools.\n\nGitLab's DevOps platform with built-in security features makes this continuous improvement possible, but there are certainly no guarantees when it comes to security. If a nation state or individual is persistent enough with a very targeted attack, such as the one against Solarwinds, even the best defenses may be susceptible to malice. Neither GitLab, nor any other vendor, can claim to be able to protect customers from these attacks alone. A Defense-in-Depth strategy is always best and the simplicity of a single DevSecOps platform like GitLab is a powerful security enabler that can simplify your efforts and improve your visibility and control points.\n",[837,750],{"slug":3704,"featured":6,"template":683},"devops-platform-supply-chain-attacks","content:en-us:blog:devops-platform-supply-chain-attacks.yml","Devops Platform Supply Chain Attacks","en-us/blog/devops-platform-supply-chain-attacks.yml","en-us/blog/devops-platform-supply-chain-attacks",{"_path":3710,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3711,"content":3717,"config":3724,"_id":3726,"_type":16,"title":3727,"_source":18,"_file":3728,"_stem":3729,"_extension":21},"/en-us/blog/custom-actions-rasa-gitlab-devops",{"title":3712,"description":3713,"ogTitle":3712,"ogDescription":3713,"noIndex":6,"ogImage":3714,"ogUrl":3715,"ogSiteName":697,"ogType":698,"canonicalUrls":3715,"schema":3716},"Creating custom action containers for Rasa X with GitLab","Using the GitLab DevOps Platform together with Rasa X can make it easier for stakeholders to deliver a virtual assistant by automating potentially time-consuming, error-prone steps. In this case, we’ve shown how you can build Rasa custom action servers and deploy them to Kubernetes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668410/Blog/Hero%20Images/vablog.jpg","https://about.gitlab.com/blog/custom-actions-rasa-gitlab-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Create and Deploy Custom Actions Containers to Rasa X using Gitlab DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2021-04-06\",\n      }",{"title":3718,"description":3713,"authors":3719,"heroImage":3714,"date":3721,"body":3722,"category":14,"tags":3723},"Create and Deploy Custom Actions Containers to Rasa X using Gitlab DevOps Platform",[3720],"William Arias","2021-04-06","\n**This blog post was a collaboration between William Arias, from Gitlab, and Vincent D. Warmerdam, from Rasa. You can find the same blog post on [Rasa's blog](https://blog.rasa.com/create-and-deploy-custom-actions-containers-to-rasa-x-using-gitlab-devops-platform/)**.  \n\n## Create and Deploy Custom Actions Containers to Rasa X using Gitlab DevOps Platform\nVirtual assistants do more than just carry on conversations. They can send emails, make updates to a calendar, or call an API endpoint. Essentially, they can do actions that add significant value and convenience to the user experience.\nIn assistants built with Rasa*, this type of functionality is executed by custom code called custom actions. As with any code you run in production, you’ll need to think about how you want to deploy updates to custom actions. In this blog post, we’ll show you how to set up GitLab to deploy custom action Docker containers to your Kubernetes cluster. If we follow [good DevOps practices](/stages-devops-lifecycle/) we can greatly speed up the development and quality of our  virtual assistants.\n* Rasa Open Source is a machine learning framework for building text and voice-based virtual assistants. It provides infrastructure for understanding messages, holding conversations, and connecting to many messaging channels and APIs. Rasa X is a toolset that runs on top of Rasa Open Source, extending its capabilities. Rasa X includes key features for sharing the assistant with test users, reviewing and annotating conversation data, and deploying the assistant. [Learn more about Rasa.](https://rasa.com/docs/)\n\n## Deployment high-level overview\nThe typical workflow for deploying a new version of custom actions is outlined below.  \n![actions-process](https://about.gitlab.com/images/blogimages/actions-process.png){: .shadow}\n\nEvery change to your custom actions code will require a new container image to be built and pulled by Rasa X. Gitlab CI/CD can save you from doing a lot of manual work and automate steps like the ones described in the workflow above. Let's see how to do it.  \n\n## Using Rasa with Gitlab DevOps Platform\nLet's create a pipeline that will automate manual steps.\n\n---\n**NOTE**\nThis article assumes you have your [Gitlab Project](https://gitlab.com/warias/gl-commit-2020) with your customs Actions Code created along with a [Google Kubernetes Cluster](https://cloud.google.com/kubernetes-engine/docs/quickstart).\n\n---\n\nIf you are a Gitlab user you are probably familiar with .gitlab-ci.yml file and its CI/CD capabilities. Every time you commit a change to your customs actions code you want Gitlab to run a script that will build and update your docker containers. \n![actions-process-2](https://about.gitlab.com/images/blogimages/process2.png){: .shadow}\n\nLet's breakdown the CI/CD pipeline by describing the gitlab-ci.yml file so you can use it and customize it to your needs\n## Variables\nWe make use of environment variables created in Gitlab at the moment of running the Jobs to define our actions Docker image  \n\n```\nvariables:\n    ACTIONS_CONTAINER_IMAGE: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG\n    TAG: $CI_COMMIT_SHA\n    K8S_SECRET: secret-gitlab-registry\n\n```\n\nThe snippet above does the following:\n- It defines the name of the Docker Image for custom actions using environment variables ```$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG.``` This will make the name of the Docker image different for every commit\n- It creates a secret used to pull the Rasa Action Image from the Gitlab Private Registry to the Google Kubernetes Cluster. \n\n## Stages\nWe have two main stages in our pipeline, build and deploy:\n```\nstages:\n  - build\n  - deploy \n```\nEvery time there is a new commit with changes to our custom actions code, or when we decide to run the CI/CD Pipeline it will:\n- Build: Here, we automate the building of the Docker image using the variables defined above, and the Dockerfile. We also tag the image and push it to the GitLab container registry.\n- Deploy: Here we log-in to Kubernetes Engine on Google Cloud and deploy the newly created Actions image to Rasa X.\nLet's see it in more detail:  \n\n**Build**:\n```\nbuild-actions-image:\n image: docker:19.03.1\n services:\n   - docker:dind\n stage: build\n script:\n   - docker login -u ```$CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY```\n   - docker build -t $ACTIONS_CONTAINER_IMAGE:$TAG -f Dockerfile .\n   - docker push $ACTIONS_CONTAINER_IMAGE:$TAG\n```\nThe job build-actions-image executed on the build stage takes advantage of the CI/CD variables that are part of the environment where the pipelines run. It automates the usage of Docker commands to build the Actions image by reading its corresponding Dockerfile. The output of this stage is a new Custom Actions image per every commit with code changes.  \n\n**Deploy**:\n```\ndeploy-custom-action-x:\n  stage: deploy\n  image: crileroro/gcloud-kubectl-helm\n  variables:\n    GCP_PROJECT: gke-project-302411\n    GCP_REGION: europe-west1\n    CLUSTER_NAME: gke-python-demo\n    NAMESPACE_RASA: rasa-environment \n  before_script:\n    - gcloud auth activate-service-account --key-file $SERVICE_ACCOUNT_GCP\n    - gcloud config set project $GCP_PROJECT\n    - gcloud config set compute/region $GCP_REGION\n    - gcloud container clusters get-credentials $CLUSTER_NAME\n  script:\n    - kubectl create ns $NAMESPACE_RASA --dry-run=client -o yaml | kubectl apply -f -\n    - kubectl create secret docker-registry $K8S_SECRET\n              --docker-server=$CI_REGISTRY\n              --docker-username=$CI_DEPLOY_USER\n              --docker-password=$CI_DEPLOY_PASSWORD\n              --namespace $NAMESPACE_RASA\n              -o yaml --dry-run=client | kubectl apply -f -\n    - helm repo add rasa-x https://rasahq.github.io/rasa-x-helm\n    - helm upgrade -i --reuse-values \n                      --namespace $NAMESPACE_RASA\n                      --set app.name=$ACTIONS_CONTAINER_IMAGE\n                      --set app.tag=$TAG \n                      --set images.imagePullSecrets[0].name=$K8S_SECRET rasa-x rasa-x/rasa-x\n```\n\nNotice the variables in ```before_script```, these ones are needed to authenticate to GCP where we have our Kubernetes cluster. This step is optional and could be skipped in cases where you have [Gitlab pre-integrated](https://docs.gitlab.com/ee/user/project/clusters/add_remove_clusters.html) with your Kubernetes cluster running on Google Cloud.  \n\nThe main and most interesting part of the script is:  \n```\nscript:\n    - kubectl create ns $NAMESPACE_RASA --dry-run=client -o yaml | kubectl apply -f -\n    - kubectl create secret docker-registry $K8S_SECRET\n              --docker-server=$CI_REGISTRY\n              --docker-username=$CI_DEPLOY_USER\n              --docker-password=$CI_DEPLOY_PASSWORD\n              --namespace $NAMESPACE_RASA\n              -o yaml --dry-run=client | kubectl apply -f -\n    - helm repo add rasa-x https://rasahq.github.io/rasa-x-helm\n    - helm upgrade -i --reuse-values \n                      --namespace $NAMESPACE_RASA\n                      --set app.name=$ACTIONS_CONTAINER_IMAGE\n                      --set app.tag=$TAG \n                      --set images.imagePullSecrets[0].name=$K8S_SECRET rasa-x rasa-x/rasa-x\n\n```\n\nWe start by creating the *namespace* for our custom actions code, and if it already exists, then we proceed to apply Kubernetes commands using kubectl and helm.  \n```\nhelm repo add rasa-x https://rasahq.github.io/rasa-x-helm\n    - helm upgrade -i --reuse-values \n                      --namespace $NAMESPACE_RASA\n                      --set app.name=$ACTIONS_CONTAINER_IMAGE\n                      --set app.tag=$TAG \n                      --set images.imagePullSecrets[0].name=$K8S_SECRET rasa-x rasa-x/rasa-x\n```\nThe snippet above adds a rasa-x Helm chart and upgrades or changes the values corresponding to the new **Custom Action Image** by assigning to it the ```$ACTIONS_CONTAINER_IMAGE``` created in the build stage.\nNote that the pipeline described above focuses only on creating and deploying the ACTIONS_CONTAINER_IMAGE. It could be extended by adding more stages, for example, code quality, security testing, and unit testing among others.  \n\n## Summary\nUsing the GitLab DevOps Platform together with Rasa X can make it easier for stakeholders to deliver a virtual assistant by automating potentially time-consuming, error-prone steps. In this case, we’ve shown how you can build Rasa custom action servers and deploy them to Kubernetes.\nPushing new custom action containers to Kubernetes only scratches the surface of what you can automate with GitLab. You could also add steps for code quality, security audits and unit tests. The main goal is to automate the manual parts of deployment so that you can focus on what is important. In the case of Rasa X, that means that more time can be spent learning from your users and making a better assistant in the process.\n\nDo you want to learn more? Watch this video of Gitlab DevOps Platform and Rasa [Deploy your Rasa Chatbots like a boss with DevOps](https://youtu.be/ko9-zPDuhQo)\n\nHappy hacking!\n\nCover image by [Eric Krull](https://unsplash.com/@ekrull?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n{: .note}\n",[793,1368,1128],{"slug":3725,"featured":6,"template":683},"custom-actions-rasa-gitlab-devops","content:en-us:blog:custom-actions-rasa-gitlab-devops.yml","Custom Actions Rasa Gitlab Devops","en-us/blog/custom-actions-rasa-gitlab-devops.yml","en-us/blog/custom-actions-rasa-gitlab-devops",{"_path":3731,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3732,"content":3738,"config":3744,"_id":3746,"_type":16,"title":3747,"_source":18,"_file":3748,"_stem":3749,"_extension":21},"/en-us/blog/how-gitlab-can-help-you-secure-your-cloud-native-applications",{"title":3733,"description":3734,"ogTitle":3733,"ogDescription":3734,"noIndex":6,"ogImage":3735,"ogUrl":3736,"ogSiteName":697,"ogType":698,"canonicalUrls":3736,"schema":3737},"How GitLab improves cloud native application security and protection","In this article, we will show you how GitLab can help you streamline your cloud native application security from a code and operations point of view by providing you with real-world examples.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664102/Blog/Hero%20Images/gitlab-values-cover.png","https://about.gitlab.com/blog/how-gitlab-can-help-you-secure-your-cloud-native-applications","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab improves cloud native application security and protection\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nico Meisenzahl\"}],\n        \"datePublished\": \"2020-08-18\",\n      }",{"title":3733,"description":3734,"authors":3739,"heroImage":3735,"date":3741,"body":3742,"category":14,"tags":3743},[3740],"Nico Meisenzahl","2020-08-18","\n{::options parse_block_html=\"true\" /}\n\nIn the [cloud-native](/topics/cloud-native/) ecosystem, decisions and changes are made on a rapid basis. Applications get adapted and deployed multiple times a week or even day. Microservices get developed decentralized with different peoples and teams involved. In such an environment, it is crucial to ensure that applications are developed and operated safely. This can be done by shifting security left into the developer lifecycle but also by using DevSecOps to empower operations with enhanced monitoring and protection for the application runtime.\n\nIn this article, I would like to show you how GitLab can help you streamline your application security from a code and operations point of view by providing you with real-world examples. Before we deep dive into the example, let me first introduce you to the [GitLab Secure](https://about.gitlab.com/stages-devops-lifecycle/secure/) and [GitLab Protect](https://about.gitlab.com/stages-devops-lifecycle/govern/) product portfolio which are the foundation for this. GitLab Secure helps developers to enable accurate, automated, and continuous assessment of their applications by proactively identifying vulnerabilities and weaknesses and therefore minimizing security risk. GitLab Protect, on the other hand, supports operations by proactively protecting environments and cloud-native applications by providing context-aware technologies to reduce overall security risk. Both are backed by leading open-source projects that have been fully integrated into developer and operation processes and the GitLab user interface (UI).\n\n## Cloud Native Application Security: The attack\n\nLet’s assume we have an application hosting a web interface that allows a user to provide some input. The application is written in [Golang](https://golang.org/) and executes the input as part of an external operating system command ([os/exec](https://golang.org/pkg/os/exec/)). The application does not contain any validation or security features to validate the input, which allows us to inject additional commands that are also executed in the application environment.\n\nThe application is running as containerized microservices in a Kubernetes cluster. The Kubernetes Cluster is shared across multiple teams and projects, allowing us to inject and read data in another application running next to ours. In our example, we will connect an unsecured Redis instance in a different Namespace and read/write data.\n\nNow let us take a closer look at how GitLab can help us detect the attack, permit its execution, and finally help us find and fix the root cause in our code.\n\n## Container Host Security\n\n[Container Host Security](/stages-devops-lifecycle/govern/) helps us to detect an attack in real-time by monitoring the pod for any unusual activity. It can then alert operations with detailed information on the attack itself.\n\nContainer Host Security is powered by [Falco](https://falco.org/), an open-source runtime security tool that listens to the Linux kernel using eBPF. Falco parses system calls and asserts the stream against a configurable rules engine in real-time. The Falco deployment used by Container Host Security can be deployed and fully managed using [GitLab Managed Apps](https://docs.gitlab.com/ee/update/removals.html).\n\nIn our example, Falco detects the injected redis-cli command, which is used to read/write data into the unsecured Redis instance. \n\n![Container Host Security](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/falco.png)\n\nFalco can now alert operations who can use those valuable insights to define and execute further steps. \n\n## Container Network Security\n\nA first step to permit access to the unsecured Redis instance would be to permit traffic between the application in our Kubernetes cluster. This can be done by using [Container Network Security](/stages-devops-lifecycle/govern/). Container Network Security is again fully managed by [GitLab Managed Apps](https://docs.gitlab.com/ee/update/removals.html) and can also be configured within the GitLab project user interface.\n\nContainer Network Security is powered by [Cilium](https://cilium.io/), an open-source networking plugin for Kubernetes that can be used to implement support for NetworkPolicy resources. [Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) can be used to detect and block unauthorized network traffic between pods and to/from the Internet.\n\nImplementing Network Policies for our application will block the underlying network traffic generated by the attack. The policies can be enabled within the GitLab project UI:\n\n![Network Policies](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/network-polices.png)\n\n## Web Application Firewall\n\nWith Container Network Security in place, our attack isn’t able to talk to the Redis instance anymore, but it is still possible to execute other network unrelated attacks using the command injection. [Web Application Firewall (WAF)](/stages-devops-lifecycle/govern/) can now help us to increase the security and detect and block the attack at the [Kubernetes Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) level. \n\nThe Web Application firewall is also powered by open-source. It is based on the [ModSecurity](https://kubernetes.github.io/ingress-nginx/user-guide/third-party-addons/modsecurity/) module, a toolkit for real-time web application monitoring, logging, and access control. It is preconfigured to use the [OWASP’s Core Rule Set](https://www.modsecurity.org/CRS/Documentation/), which provides generic attack detection capabilities. Like the other integrations, Web Application Firewall is also fully managed by GitLab using [GitLab Managed Apps](https://docs.gitlab.com/ee/update/removals.html).\n\nIn our example, the Web Application Firewall detects the attack and is also able to block it:\n\n![Web Application Firewall logs](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/waf-log.png)\n\nBlocking the attack at the Ingress level will help us to deny the traffic before it hits our application. To do so, we can enable the Web Application Firewall blocking mode directly from the GitLab UI:\n\n![WAF settings](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/waf-settings.png)\n\nIn addition to Container Host Security, we could have used the Web Application Firewall to detect the attack using the Thread Monitoring dashboard within our GitLab project:\n\n![Thread Monitoring](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/thread-monitoring.png)\n\nThe Thread Monitoring dashboard also provides us with useful insights and metrics of our enforced Container Network Policy.\n\n## Static Application Security Testing\n\nWe have now successfully protected our application runtime and ensured that no additional attacks can be executed. But we should also find and fix the root cause to ensure that such incidents are not recurring in the future. This is where [Static Application Security Testing (SAST)](/stages-devops-lifecycle/secure/) can help us. Static Application Security Testing can be easily integrated into our project using [GitLab CI/CD](https://docs.gitlab.com/ee/ci/) and then allows us to analyze our [source code](/solutions/source-code-management/) for known vulnerabilities.\n\nIn our case (a Golang application) the code scanning is executed using the open-source project [Golang Security Checker](https://github.com/securego/gosec). The results are displayed in the Security dashboard of our GitLab project for easy access:\n\n![Security Dashboard](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/sec-dashboard.png)\n\nIn our example, the code scan has identified the root cause and provides us with detailed information about the vulnerability, the line of code that needs to be fixed, and the ability to easily create an issue to fix it.\n\n![SAST](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/sast.png)\n\nFinally, of course, we should also talk to the team running the other application to make sure that their Redis instance gets secured too. We should also verify how the other [GitLab Secure](https://about.gitlab.com/stages-devops-lifecycle/secure/) features can help to further improve the overall security of the application.\n\n## GitLab Protect and Secure in action\n\nIf you like to get more insights on GitLab Secure and Protect and want to see it in action, you are welcome to join [Wayne](https://gitlab.com/whaber), [Philippe](https://gitlab.com/plafoucriere) and myself in our session [“Your Attackers Won't Be Happy! How GitLab Can Help You Secure Your Cloud-Native Applications!”](https://gitlabcommitvirtual2020.sched.com/event/dUWw/your-attackers-wont-be-happy-how-gitlab-can-help-you-secure-your-cloud-native-applications) at GitLab Commit where you can gain further insights on Container Host Security, Container Network Security, Web Application Firewall (WAF), and Status Application Security Testing (SAST).\n\nRegister today and join me and others at [GitLab Commit](https://about.gitlab.com/events/commit/) on August 26. GitLab Commit 2020 is a free 24-hour virtual experience filled with practical DevOps strategies shared by leaders in development, operations, and security.\n",[1368,1128,2572,1488,750],{"slug":3745,"featured":6,"template":683},"how-gitlab-can-help-you-secure-your-cloud-native-applications","content:en-us:blog:how-gitlab-can-help-you-secure-your-cloud-native-applications.yml","How Gitlab Can Help You Secure Your Cloud Native Applications","en-us/blog/how-gitlab-can-help-you-secure-your-cloud-native-applications.yml","en-us/blog/how-gitlab-can-help-you-secure-your-cloud-native-applications",18,[690,712,736,757,779,800,823,844,864],1754424494171]